From a989c41e24de16a9b479703860dfc2de0eec558b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Jan 2023 15:03:58 +0000 Subject: [PATCH] build(deps): bump github.com/containers/storage from 1.32.1 to 1.45.3 Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.32.1 to 1.45.3. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.32.1...v1.45.3) --- updated-dependencies: - dependency-name: github.com/containers/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 20 +- go.sum | 206 +- vendor/github.com/BurntSushi/toml/.gitignore | 7 +- vendor/github.com/BurntSushi/toml/.travis.yml | 15 - vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 - vendor/github.com/BurntSushi/toml/Makefile | 19 - vendor/github.com/BurntSushi/toml/README.md | 230 +- vendor/github.com/BurntSushi/toml/decode.go | 505 +- .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 21 + vendor/github.com/BurntSushi/toml/doc.go | 36 +- vendor/github.com/BurntSushi/toml/encode.go | 606 ++- .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/github.com/BurntSushi/toml/error.go | 279 ++ vendor/github.com/BurntSushi/toml/go.mod | 3 + .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 770 +++- .../toml/{decode_meta.go => meta.go} | 124 +- vendor/github.com/BurntSushi/toml/parse.go | 681 ++- vendor/github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_fields.go | 4 +- .../toml/{type_check.go => type_toml.go} | 23 +- .../github.com/Microsoft/go-winio/README.md | 27 +- .../Microsoft/go-winio/backuptar/tar.go | 189 +- vendor/github.com/Microsoft/go-winio/file.go | 6 + vendor/github.com/Microsoft/go-winio/go.mod | 3 +- vendor/github.com/Microsoft/go-winio/go.sum | 3 - .../github.com/Microsoft/go-winio/hvsock.go | 17 +- .../Microsoft/go-winio/pkg/guid/guid.go | 9 - .../go-winio/pkg/guid/guid_nonwindows.go | 15 + .../go-winio/pkg/guid/guid_windows.go | 10 + .../pkg/security/grantvmgroupaccess.go | 15 +- .../go-winio/pkg/security/syscall_windows.go | 6 +- .../go-winio/pkg/security/zsyscall_windows.go | 24 +- .../github.com/Microsoft/go-winio/vhd/vhd.go | 67 +- .../Microsoft/go-winio/vhd/zvhd_windows.go | 52 +- .../github.com/Microsoft/hcsshim/.gitignore | 39 +- .../Microsoft/hcsshim/.golangci.yml | 99 + vendor/github.com/Microsoft/hcsshim/Makefile | 87 + vendor/github.com/Microsoft/hcsshim/README.md | 82 +- vendor/github.com/Microsoft/hcsshim/errors.go | 6 +- vendor/github.com/Microsoft/hcsshim/go.mod | 26 +- vendor/github.com/Microsoft/hcsshim/go.sum | 136 +- .../Microsoft/hcsshim/hnsendpoint.go | 8 + .../Microsoft/hcsshim/internal/cow/cow.go | 6 + .../Microsoft/hcsshim/internal/hcs/errors.go | 34 +- .../Microsoft/hcsshim/internal/hcs/process.go | 56 +- .../internal/hcs/schema2/attachment.go | 6 + .../hcsshim/internal/hcs/schema2/container.go | 2 + .../internal/hcs/schema2/cpu_group_config.go | 2 +- .../hcsshim/internal/hcs/schema2/device.go | 8 +- .../model_container_definition_device.go | 14 + .../hcs/schema2/model_device_category.go | 15 + .../hcs/schema2/model_device_extension.go | 15 + .../hcs/schema2/model_device_instance.go | 17 + .../hcs/schema2/model_device_namespace.go | 16 + .../hcs/schema2/model_interface_class.go | 16 + .../internal/hcs/schema2/model_namespace.go | 15 + .../hcs/schema2/model_object_directory.go | 18 + .../hcs/schema2/model_object_namespace.go | 16 + .../hcs/schema2/model_object_symlink.go | 18 + .../hcs/schema2/virtual_p_mem_mapping.go | 15 + .../Microsoft/hcsshim/internal/hcs/system.go | 200 +- .../hcsshim/internal/hns/hnsendpoint.go | 30 + .../hcsshim/internal/hns/hnspolicy.go | 33 +- .../hcsshim/internal/jobobject/iocp.go | 111 + .../hcsshim/internal/jobobject/jobobject.go | 538 +++ .../hcsshim/internal/jobobject/limits.go | 315 ++ .../Microsoft/hcsshim/internal/queue/mq.go | 92 + .../hcsshim/internal/wclayer/activatelayer.go | 2 +- .../hcsshim/internal/wclayer/createlayer.go | 2 +- .../internal/wclayer/createscratchlayer.go | 2 +- .../hcsshim/internal/wclayer/destroylayer.go | 2 +- .../internal/wclayer/expandscratchsize.go | 2 +- .../hcsshim/internal/wclayer/exportlayer.go | 2 +- .../internal/wclayer/getlayermountpath.go | 4 +- .../internal/wclayer/getsharedbaseimages.go | 2 +- .../hcsshim/internal/wclayer/grantvmaccess.go | 2 +- .../hcsshim/internal/wclayer/importlayer.go | 2 +- .../hcsshim/internal/wclayer/layerexists.go | 2 +- .../hcsshim/internal/wclayer/legacy.go | 2 +- .../hcsshim/internal/wclayer/nametoguid.go | 4 +- .../hcsshim/internal/wclayer/preparelayer.go | 2 +- .../internal/wclayer/unpreparelayer.go | 2 +- .../hcsshim/internal/winapi/console.go | 44 + .../Microsoft/hcsshim/internal/winapi/iocp.go | 3 - .../hcsshim/internal/winapi/jobobject.go | 11 +- .../hcsshim/internal/winapi/memory.go | 23 - .../hcsshim/internal/winapi/process.go | 65 +- .../hcsshim/internal/winapi/system.go | 3 +- .../hcsshim/internal/winapi/utils.go | 25 +- .../hcsshim/internal/winapi/winapi.go | 2 +- .../internal/winapi/zsyscall_windows.go | 93 +- .../hcsshim/osversion/windowsbuilds.go | 12 + .../github.com/containers/storage/.cirrus.yml | 48 +- .../containers/storage/CODE-OF-CONDUCT.md | 2 +- vendor/github.com/containers/storage/Makefile | 35 +- .../github.com/containers/storage/SECURITY.md | 2 +- vendor/github.com/containers/storage/VERSION | 2 +- .../github.com/containers/storage/Vagrantfile | 25 - .../containers/storage/containers.go | 666 ++- .../containers/storage/deprecated.go | 216 + .../containers/storage/drivers/aufs/aufs.go | 58 +- .../containers/storage/drivers/aufs/dirs.go | 4 +- .../containers/storage/drivers/aufs/mount.go | 1 + .../storage/drivers/aufs/mount_unsupported.go | 12 - .../containers/storage/drivers/btrfs/btrfs.go | 64 +- .../drivers/btrfs/dummy_unsupported.go | 1 + .../storage/drivers/btrfs/version.go | 1 + .../storage/drivers/btrfs/version_none.go | 3 +- .../containers/storage/drivers/chown.go | 16 +- .../storage/drivers/chown_darwin.go | 109 + .../containers/storage/drivers/chown_unix.go | 72 +- .../storage/drivers/chown_windows.go | 10 +- .../containers/storage/drivers/chroot_unix.go | 5 +- .../storage/drivers/chroot_windows.go | 2 +- .../storage/drivers/copy/copy_linux.go | 6 +- .../storage/drivers/copy/copy_unsupported.go | 7 +- .../containers/storage/drivers/counter.go | 5 + .../storage/drivers/devmapper/device_setup.go | 58 +- .../storage/drivers/devmapper/deviceset.go | 124 +- .../drivers/devmapper/devmapper_doc.go | 1 + .../storage/drivers/devmapper/driver.go | 11 +- .../storage/drivers/devmapper/jsoniter.go | 3 + .../storage/drivers/devmapper/mount.go | 1 + .../containers/storage/drivers/driver.go | 73 +- .../storage/drivers/driver_darwin.go | 14 + .../storage/drivers/driver_freebsd.go | 28 + .../storage/drivers/driver_linux.go | 66 +- .../storage/drivers/driver_solaris.go | 1 + .../storage/drivers/driver_unsupported.go | 3 +- .../containers/storage/drivers/fsdiff.go | 45 +- .../storage/drivers/overlay/check.go | 96 +- .../storage/drivers/overlay/check_116.go | 45 + .../storage/drivers/overlay/idmapped_utils.go | 155 + .../storage/drivers/overlay/jsoniter.go | 3 + .../storage/drivers/overlay/mount.go | 106 +- .../storage/drivers/overlay/overlay.go | 709 ++- .../storage/drivers/overlay/overlay_cgo.go | 1 + .../storage/drivers/overlay/overlay_nocgo.go | 1 + .../drivers/overlay/overlay_unsupported.go | 1 + .../storage/drivers/overlay/randomid.go | 5 +- .../drivers/overlayutils/overlayutils.go | 4 +- .../storage/drivers/quota/projectquota.go | 172 +- .../drivers/quota/projectquota_unsupported.go | 6 +- .../storage/drivers/register/register_aufs.go | 1 + .../drivers/register/register_btrfs.go | 1 + .../drivers/register/register_devicemapper.go | 1 + .../drivers/register/register_overlay.go | 1 + .../storage/drivers/register/register_zfs.go | 3 +- .../containers/storage/drivers/template.go | 7 +- .../storage/drivers/vfs/copy_unsupported.go | 1 + .../containers/storage/drivers/vfs/driver.go | 46 +- .../storage/drivers/windows/windows.go | 41 +- .../containers/storage/drivers/zfs/zfs.go | 53 +- .../storage/drivers/zfs/zfs_freebsd.go | 22 +- .../storage/drivers/zfs/zfs_linux.go | 10 +- .../storage/drivers/zfs/zfs_unsupported.go | 9 +- .../github.com/containers/storage/errors.go | 7 + vendor/github.com/containers/storage/go.mod | 56 +- vendor/github.com/containers/storage/go.sum | 199 +- vendor/github.com/containers/storage/idset.go | 49 +- .../github.com/containers/storage/images.go | 673 ++- .../github.com/containers/storage/layers.go | 1514 ++++-- .../containers/storage/lockfile_compat.go | 5 +- .../containers/storage/pkg/archive/archive.go | 128 +- .../storage/pkg/archive/archive_110.go | 1 + .../storage/pkg/archive/archive_19.go | 1 + .../storage/pkg/archive/archive_bsd.go | 19 + .../storage/pkg/archive/archive_freebsd.go | 125 - .../storage/pkg/archive/archive_linux.go | 21 +- .../storage/pkg/archive/archive_other.go | 1 + .../storage/pkg/archive/archive_unix.go | 40 +- .../storage/pkg/archive/archive_windows.go | 8 +- .../containers/storage/pkg/archive/changes.go | 5 +- .../storage/pkg/archive/changes_other.go | 15 +- .../storage/pkg/archive/changes_unix.go | 2 + .../containers/storage/pkg/archive/copy.go | 3 +- .../storage/pkg/archive/copy_unix.go | 1 + .../containers/storage/pkg/archive/diff.go | 31 +- .../storage/pkg/archive/fflags_bsd.go | 167 + .../storage/pkg/archive/fflags_unsupported.go | 21 + .../storage/pkg/archive/time_unsupported.go | 1 + .../storage/pkg/chrootarchive/archive.go | 30 +- .../pkg/chrootarchive/archive_darwin.go | 17 + .../storage/pkg/chrootarchive/archive_unix.go | 26 +- .../pkg/chrootarchive/archive_windows.go | 2 +- .../storage/pkg/chrootarchive/chroot_linux.go | 28 +- .../storage/pkg/chrootarchive/chroot_unix.go | 3 +- .../storage/pkg/chrootarchive/diff_darwin.go | 40 + .../storage/pkg/chrootarchive/diff_unix.go | 22 +- .../storage/pkg/chrootarchive/diff_windows.go | 3 +- .../storage/pkg/chrootarchive/init_darwin.go | 4 + .../storage/pkg/chrootarchive/init_unix.go | 6 +- .../storage/pkg/chrootarchive/jsoniter.go | 3 + .../containers/storage/pkg/config/config.go | 109 +- .../storage/pkg/devicemapper/devmapper.go | 3 +- .../storage/pkg/devicemapper/devmapper_log.go | 1 + .../pkg/devicemapper/devmapper_wrapper.go | 1 + .../devmapper_wrapper_deferred_remove.go | 1 + .../devicemapper/devmapper_wrapper_dynamic.go | 1 + .../devmapper_wrapper_no_deferred_remove.go | 1 + .../devicemapper/devmapper_wrapper_static.go | 1 + .../storage/pkg/devicemapper/ioctl.go | 1 + .../storage/pkg/directory/directory.go | 3 +- .../storage/pkg/directory/directory_unix.go | 12 +- .../pkg/directory/directory_windows.go | 17 +- .../storage/pkg/dmesg/dmesg_linux.go | 1 + .../storage/pkg/fileutils/fileutils.go | 16 +- .../storage/pkg/fileutils/fileutils_unix.go | 6 +- .../storage/pkg/fsutils/fsutils_linux.go | 6 +- .../containers/storage/pkg/homedir/homedir.go | 52 + .../storage/pkg/homedir/homedir_others.go | 16 +- .../storage/pkg/homedir/homedir_unix.go | 50 +- .../storage/pkg/homedir/homedir_windows.go | 7 +- .../containers/storage/pkg/idtools/idtools.go | 177 +- .../storage/pkg/idtools/idtools_supported.go | 87 + .../storage/pkg/idtools/idtools_unix.go | 4 + .../pkg/idtools/idtools_unsupported.go | 12 + .../storage/pkg/idtools/idtools_windows.go | 1 + .../containers/storage/pkg/idtools/parser.go | 8 +- .../storage/pkg/idtools/usergroupadd_linux.go | 45 +- .../pkg/idtools/usergroupadd_unsupported.go | 3 +- .../storage/pkg/idtools/utils_unix.go | 3 +- .../storage/pkg/ioutils/fswriters.go | 50 +- .../pkg/ioutils/fswriters_unsupported.go | 1 + .../containers/storage/pkg/ioutils/readers.go | 17 + .../storage/pkg/ioutils/temp_unix.go | 7 +- .../storage/pkg/ioutils/temp_windows.go | 7 +- .../storage/pkg/lockfile/lockfile.go | 83 +- .../storage/pkg/lockfile/lockfile_unix.go | 350 +- .../storage/pkg/lockfile/lockfile_windows.go | 118 +- .../storage/pkg/loopback/attach_loopback.go | 11 +- .../containers/storage/pkg/loopback/ioctl.go | 1 + .../storage/pkg/loopback/loop_wrapper.go | 1 + .../storage/pkg/loopback/loopback.go | 5 +- .../containers/storage/pkg/mount/flags.go | 4 +- .../storage/pkg/mount/flags_freebsd.go | 48 + .../storage/pkg/mount/flags_unsupported.go | 3 +- .../storage/pkg/mount/mounter_freebsd.go | 26 +- .../storage/pkg/mount/mounter_unsupported.go | 4 +- .../storage/pkg/mount/unmount_unix.go | 27 +- .../storage/pkg/mount/unmount_unsupported.go | 1 + .../storage/pkg/parsers/kernel/kernel.go | 1 + .../pkg/parsers/kernel/kernel_darwin.go | 7 +- .../storage/pkg/parsers/kernel/kernel_unix.go | 3 +- .../pkg/parsers/kernel/kernel_windows.go | 1 + .../pkg/parsers/kernel/uname_freebsd.go | 17 + .../pkg/parsers/kernel/uname_unsupported.go | 16 +- .../parsers/kernel/uname_unsupported_type.go | 11 + .../containers/storage/pkg/parsers/parsers.go | 2 +- .../storage/pkg/reexec/command_freebsd.go | 38 + .../storage/pkg/reexec/command_linux.go | 3 + .../storage/pkg/reexec/command_unix.go | 5 +- .../storage/pkg/reexec/command_unsupported.go | 3 + .../storage/pkg/reexec/command_windows.go | 3 + .../containers/storage/pkg/reexec/reexec.go | 21 +- .../containers/storage/pkg/regexp/regexp.go | 214 + .../pkg/regexp/regexp_dontprecompile.go | 6 + .../storage/pkg/regexp/regexp_precompile.go | 6 + .../storage/pkg/stringid/stringid.go | 19 +- .../storage/pkg/system/chtimes_unix.go | 1 + .../storage/pkg/system/chtimes_windows.go | 1 + .../containers/storage/pkg/system/init.go | 2 +- .../storage/pkg/system/lchflags_bsd.go | 56 + .../storage/pkg/system/lcow_unix.go | 1 + .../storage/pkg/system/lstat_unix.go | 3 +- .../storage/pkg/system/meminfo_freebsd.go | 85 + .../storage/pkg/system/meminfo_solaris.go | 3 +- .../storage/pkg/system/meminfo_unsupported.go | 6 +- .../containers/storage/pkg/system/mknod.go | 5 +- .../storage/pkg/system/mknod_freebsd.go | 5 +- .../storage/pkg/system/mknod_windows.go | 1 + .../storage/pkg/system/path_unix.go | 1 + .../storage/pkg/system/path_windows.go | 5 +- .../storage/pkg/system/process_unix.go | 1 + .../containers/storage/pkg/system/rm.go | 20 +- .../storage/pkg/system/rm_common.go | 10 + .../storage/pkg/system/rm_freebsd.go | 17 + .../storage/pkg/system/stat_common.go | 13 + .../storage/pkg/system/stat_freebsd.go | 17 +- .../storage/pkg/system/stat_linux.go | 3 +- .../storage/pkg/system/stat_unix.go | 8 + .../storage/pkg/system/stat_windows.go | 6 + .../storage/pkg/system/syscall_unix.go | 14 +- .../storage/pkg/system/syscall_windows.go | 5 + .../containers/storage/pkg/system/umask.go | 1 + .../storage/pkg/system/umask_windows.go | 1 + .../storage/pkg/system/utimes_freebsd.go | 5 +- .../storage/pkg/system/utimes_unsupported.go | 1 + .../storage/pkg/system/xattrs_darwin.go | 84 + .../storage/pkg/system/xattrs_linux.go | 3 + .../storage/pkg/system/xattrs_unsupported.go | 6 +- .../storage/pkg/tarlog/tarlogger.go | 2 +- .../storage/pkg/truncindex/truncindex.go | 10 +- .../storage/pkg/unshare/getenv_linux_cgo.go | 1 + .../storage/pkg/unshare/getenv_linux_nocgo.go | 1 + .../containers/storage/pkg/unshare/unshare.c | 2 +- .../containers/storage/pkg/unshare/unshare.go | 26 +- .../storage/pkg/unshare/unshare_cgo.go | 5 +- .../storage/pkg/unshare/unshare_darwin.go | 54 + .../storage/pkg/unshare/unshare_freebsd.c | 76 + .../storage/pkg/unshare/unshare_freebsd.go | 179 + .../storage/pkg/unshare/unshare_gccgo.go | 3 +- .../storage/pkg/unshare/unshare_linux.go | 217 +- .../pkg/unshare/unshare_unsupported.go | 8 +- .../pkg/unshare/unshare_unsupported_cgo.go | 3 +- .../containers/storage/storage.conf | 50 +- .../containers/storage/storage.conf-freebsd | 205 + vendor/github.com/containers/storage/store.go | 3036 ++++++------ .../storage/types/default_override_test.conf | 11 + .../containers/storage/types/errors.go | 4 + .../containers/storage/types/idmappings.go | 7 +- .../containers/storage/types/options.go | 288 +- .../storage/types/options_darwin.go | 13 + .../storage/types/options_freebsd.go | 14 + .../containers/storage/types/options_linux.go | 14 + .../storage/types/options_windows.go | 14 + .../storage/types/storage_broken.conf | 18 + .../containers/storage/types/utils.go | 33 +- .../github.com/containers/storage/userns.go | 69 +- vendor/github.com/containers/storage/utils.go | 49 +- .../coreos/go-systemd/v22/dbus/dbus.go | 36 +- .../coreos/go-systemd/v22/dbus/methods.go | 303 +- .../cyphar/filepath-securejoin/.travis.yml | 8 +- .../cyphar/filepath-securejoin/README.md | 20 +- .../cyphar/filepath-securejoin/VERSION | 2 +- .../cyphar/filepath-securejoin/go.mod | 3 + .../cyphar/filepath-securejoin/join.go | 25 +- .../cyphar/filepath-securejoin/vendor.conf | 1 - vendor/github.com/docker/go-units/size.go | 70 +- .../dbus/v5/{README.markdown => README.md} | 12 +- vendor/github.com/godbus/dbus/v5/auth.go | 15 +- vendor/github.com/godbus/dbus/v5/conn.go | 36 +- vendor/github.com/godbus/dbus/v5/decoder.go | 16 +- vendor/github.com/godbus/dbus/v5/encoder.go | 61 +- vendor/github.com/godbus/dbus/v5/export.go | 37 +- vendor/github.com/godbus/dbus/v5/message.go | 73 +- vendor/github.com/godbus/dbus/v5/sig.go | 66 +- .../godbus/dbus/v5/transport_generic.go | 10 +- .../godbus/dbus/v5/transport_unix.go | 24 +- .../dbus/v5/transport_unixcred_netbsd.go | 14 + vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 17 - vendor/github.com/golang/snappy/CONTRIBUTORS | 39 - vendor/github.com/golang/snappy/README | 107 - .../github.com/golang/snappy/decode_amd64.s | 490 -- .../github.com/golang/snappy/decode_arm64.s | 494 -- vendor/github.com/golang/snappy/decode_asm.go | 15 - .../github.com/golang/snappy/encode_amd64.s | 730 --- .../github.com/golang/snappy/encode_arm64.s | 722 --- vendor/github.com/golang/snappy/encode_asm.go | 30 - vendor/github.com/golang/snappy/go.mod | 1 - vendor/github.com/imdario/mergo/.travis.yml | 3 + vendor/github.com/imdario/mergo/README.md | 2 +- vendor/github.com/imdario/mergo/merge.go | 9 +- vendor/github.com/json-iterator/go/README.md | 2 - vendor/github.com/json-iterator/go/go.mod | 2 +- vendor/github.com/json-iterator/go/go.sum | 5 +- .../klauspost/compress/.gitattributes | 2 + .../bitset => klauspost/compress}/.gitignore | 8 +- .../klauspost/compress/.goreleaser.yml | 141 + vendor/github.com/klauspost/compress/LICENSE | 276 ++ .../github.com/klauspost/compress/README.md | 593 +++ .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/flate/deflate.go | 204 +- .../klauspost/compress/flate/dict_decoder.go | 24 +- .../klauspost/compress/flate/fast_encoder.go | 86 +- .../compress/flate/huffman_bit_writer.go | 371 +- .../klauspost/compress/flate/huffman_code.go | 121 +- .../klauspost/compress/flate/inflate.go | 257 +- .../klauspost/compress/flate/inflate_gen.go | 729 ++- .../klauspost/compress/flate/level1.go | 96 +- .../klauspost/compress/flate/level2.go | 45 +- .../klauspost/compress/flate/level3.go | 80 +- .../klauspost/compress/flate/level4.go | 21 +- .../klauspost/compress/flate/level5.go | 38 +- .../klauspost/compress/flate/level6.go | 40 +- .../klauspost/compress/flate/regmask_other.go | 3 +- .../klauspost/compress/flate/stateless.go | 33 +- .../klauspost/compress/flate/token.go | 49 +- vendor/github.com/klauspost/compress/gen.sh | 4 + vendor/github.com/klauspost/compress/go.mod | 3 + .../bitset => klauspost/compress}/go.sum | 0 .../klauspost/compress/huff0/bitreader.go | 122 +- .../klauspost/compress/huff0/bitwriter.go | 115 - .../klauspost/compress/huff0/bytereader.go | 10 - .../klauspost/compress/huff0/compress.go | 98 +- .../klauspost/compress/huff0/decompress.go | 1015 ++-- .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 846 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 64 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref}/LICENSE | 0 .../compress/internal/snapref}/decode.go | 85 +- .../internal/snapref}/decode_other.go | 4 +- .../compress/internal/snapref}/encode.go | 2 +- .../internal/snapref}/encode_other.go | 10 +- .../compress/internal/snapref}/snappy.go | 4 +- vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 182 +- .../klauspost/compress/zstd/bitreader.go | 12 +- .../klauspost/compress/zstd/bitwriter.go | 98 +- .../klauspost/compress/zstd/blockdec.go | 537 ++- .../klauspost/compress/zstd/blockenc.go | 160 +- .../klauspost/compress/zstd/bytebuf.go | 25 +- .../klauspost/compress/zstd/bytereader.go | 6 - .../klauspost/compress/zstd/decodeheader.go | 93 +- .../klauspost/compress/zstd/decoder.go | 704 ++- .../compress/zstd/decoder_options.go | 96 +- .../klauspost/compress/zstd/dict.go | 7 +- .../klauspost/compress/zstd/enc_base.go | 18 +- .../klauspost/compress/zstd/enc_best.go | 217 +- .../klauspost/compress/zstd/enc_better.go | 115 +- .../klauspost/compress/zstd/enc_dfast.go | 102 +- .../klauspost/compress/zstd/enc_fast.go | 220 +- .../klauspost/compress/zstd/encoder.go | 154 +- .../compress/zstd/encoder_options.go | 30 +- .../klauspost/compress/zstd/framedec.go | 400 +- .../klauspost/compress/zstd/fse_decoder.go | 128 +- .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 30 +- .../klauspost/compress/zstd/hash.go | 66 +- .../klauspost/compress/zstd/history.go | 67 +- .../compress/zstd/internal/xxhash/README.md | 49 +- .../compress/zstd/internal/xxhash/xxhash.go | 48 +- .../zstd/internal/xxhash/xxhash_amd64.s | 337 +- .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../xxhash/{xxhash_amd64.go => xxhash_asm.go} | 5 +- .../zstd/internal/xxhash/xxhash_other.go | 22 +- .../klauspost/compress/zstd/seqdec.go | 327 +- .../klauspost/compress/zstd/seqdec_amd64.go | 379 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4079 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/snappy.go | 6 +- .../github.com/klauspost/compress/zstd/zip.go | 78 +- .../klauspost/compress/zstd/zstd.go | 60 +- .../mattn/go-shellwords/shellwords.go | 2 +- vendor/github.com/mistifyio/go-zfs/.gitignore | 1 - .../github.com/mistifyio/go-zfs/.travis.yml | 43 - .../github.com/mistifyio/go-zfs/Vagrantfile | 34 - .../mistifyio/go-zfs/utils_notsolaris.go | 17 - .../mistifyio/go-zfs/utils_solaris.go | 17 - vendor/github.com/mistifyio/go-zfs/v3/.envrc | 4 + .../github.com/mistifyio/go-zfs/v3/.gitignore | 6 + .../mistifyio/go-zfs/v3/.golangci.yml | 207 + .../github.com/mistifyio/go-zfs/v3/.yamllint | 16 + .../mistifyio/go-zfs/v3/CHANGELOG.md | 250 + .../mistifyio/go-zfs/{ => v3}/CONTRIBUTING.md | 44 +- .../mistifyio/go-zfs/{ => v3}/LICENSE | 0 .../github.com/mistifyio/go-zfs/v3/Makefile | 19 + .../mistifyio/go-zfs/{ => v3}/README.md | 13 +- .../mistifyio/go-zfs/v3/Vagrantfile | 33 + .../mistifyio/go-zfs/{ => v3}/error.go | 0 vendor/github.com/mistifyio/go-zfs/v3/go.mod | 5 + vendor/github.com/mistifyio/go-zfs/v3/go.sum | 2 + vendor/github.com/mistifyio/go-zfs/v3/lint.mk | 75 + .../github.com/mistifyio/go-zfs/v3/rules.mk | 49 + .../github.com/mistifyio/go-zfs/v3/shell.nix | 26 + .../mistifyio/go-zfs/{ => v3}/utils.go | 90 +- .../mistifyio/go-zfs/v3/utils_notsolaris.go | 19 + .../mistifyio/go-zfs/v3/utils_solaris.go | 19 + .../mistifyio/go-zfs/{ => v3}/zfs.go | 165 +- .../mistifyio/go-zfs/{ => v3}/zpool.go | 37 +- vendor/github.com/moby/sys/mountinfo/go.mod | 4 +- vendor/github.com/moby/sys/mountinfo/go.sum | 4 +- .../moby/sys/mountinfo/mounted_linux.go | 63 +- .../moby/sys/mountinfo/mounted_unix.go | 23 +- .../moby/sys/mountinfo/mountinfo.go | 9 +- .../moby/sys/mountinfo/mountinfo_bsd.go | 47 +- .../sys/mountinfo/mountinfo_freebsdlike.go | 14 + .../moby/sys/mountinfo/mountinfo_linux.go | 27 +- .../moby/sys/mountinfo/mountinfo_openbsd.go | 11 + .../sys/mountinfo/mountinfo_unsupported.go | 3 +- .../github.com/modern-go/reflect2/.travis.yml | 2 +- .../github.com/modern-go/reflect2/Gopkg.lock | 8 +- .../github.com/modern-go/reflect2/Gopkg.toml | 4 - vendor/github.com/modern-go/reflect2/go.mod | 3 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_17.go | 8 - .../modern-go/reflect2/go_above_19.go | 3 + .../modern-go/reflect2/go_below_118.go | 21 + .../modern-go/reflect2/go_below_17.go | 9 - .../modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 20 +- vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 51 +- .../modern-go/reflect2/unsafe_link.go | 26 +- .../modern-go/reflect2/unsafe_map.go | 8 - .../runc/libcontainer/apparmor/apparmor.go | 16 + .../libcontainer/apparmor/apparmor_linux.go | 16 +- .../apparmor/apparmor_unsupported.go | 11 +- .../runc/libcontainer/cgroups/cgroups.go | 2 - .../cgroups/cgroups_unsupported.go | 3 - .../runc/libcontainer/cgroups/file.go | 190 + .../libcontainer/cgroups/fscommon/fscommon.go | 51 - .../libcontainer/cgroups/fscommon/open.go | 120 - .../libcontainer/cgroups/fscommon/utils.go | 122 - .../runc/libcontainer/cgroups/getallpids.go | 27 + .../runc/libcontainer/cgroups/stats.go | 16 +- .../runc/libcontainer/cgroups/utils.go | 121 +- .../runc/libcontainer/cgroups/v1_utils.go | 19 +- .../runc/libcontainer/configs/cgroup_linux.go | 42 +- .../configs/cgroup_unsupported.go | 5 +- .../runc/libcontainer/configs/config.go | 44 +- .../runc/libcontainer/configs/config_linux.go | 17 +- .../libcontainer/configs/configs_fuzzer.go | 1 + .../runc/libcontainer/configs/devices.go | 17 - .../runc/libcontainer/configs/intelrdt.go | 3 + .../runc/libcontainer/configs/mount.go | 11 +- .../configs/namespaces_syscall.go | 1 + .../configs/namespaces_syscall_unsupported.go | 1 + .../configs/namespaces_unsupported.go | 4 +- .../runc/libcontainer/configs/network.go | 13 +- .../runc/libcontainer/configs/rdma.go | 9 + .../runc/libcontainer/devices/device_unix.go | 23 +- .../runc/libcontainer/system/linux.go | 101 - .../runc/libcontainer/system/proc.go | 103 - .../libcontainer/system/syscall_linux_32.go | 26 - .../libcontainer/system/syscall_linux_64.go | 26 - .../libcontainer/system/userns_deprecated.go | 5 - .../runc/libcontainer/system/xattrs_linux.go | 35 - .../runc/libcontainer/user/lookup_unix.go | 1 + .../runc/libcontainer/user/user.go | 139 +- .../runc/libcontainer/user/user_fuzzer.go | 1 + .../runc/libcontainer/userns/userns_fuzzer.go | 1 + .../libcontainer/userns/userns_unsupported.go | 1 + .../runc/libcontainer/utils/cmsg.go | 11 +- .../runc/libcontainer/utils/utils.go | 14 +- .../runc/libcontainer/utils/utils_unix.go | 5 +- .../opencontainers/selinux/go-selinux/doc.go | 1 - .../selinux/go-selinux/label/label_linux.go | 57 +- .../selinux/go-selinux/label/label_stub.go | 1 + .../selinux/go-selinux/rchcon.go | 34 + .../selinux/go-selinux/rchcon_go115.go | 22 + .../selinux/go-selinux/selinux.go | 26 +- .../selinux/go-selinux/selinux_linux.go | 362 +- .../selinux/go-selinux/selinux_stub.go | 15 +- .../selinux/go-selinux/xattrs_linux.go | 37 +- .../selinux/pkg/pwalk/README.md | 6 + .../opencontainers/selinux/pkg/pwalk/pwalk.go | 21 +- .../selinux/pkg/pwalkdir/README.md | 54 + .../selinux/pkg/pwalkdir/pwalkdir.go | 116 + .../seccomp/libseccomp-golang/.golangci.yml | 4 + .../seccomp/libseccomp-golang/.travis.yml | 37 - .../seccomp/libseccomp-golang/CONTRIBUTING.md | 26 +- .../seccomp/libseccomp-golang/Makefile | 15 +- .../seccomp/libseccomp-golang/README.md | 24 +- .../seccomp/libseccomp-golang/SECURITY.md | 47 + .../seccomp/libseccomp-golang/go.sum | 23 - .../seccomp/libseccomp-golang/seccomp.go | 408 +- .../libseccomp-golang/seccomp_internal.go | 379 +- vendor/github.com/sirupsen/logrus/README.md | 4 +- .../github.com/sirupsen/logrus/buffer_pool.go | 9 - vendor/github.com/sirupsen/logrus/entry.go | 21 +- vendor/github.com/sirupsen/logrus/go.mod | 5 +- vendor/github.com/sirupsen/logrus/go.sum | 14 +- vendor/github.com/sirupsen/logrus/logger.go | 13 + .../testify/assert/assertion_compare.go | 76 +- .../assert/assertion_compare_can_convert.go | 16 + .../assert/assertion_compare_legacy.go | 16 + .../testify/assert/assertion_format.go | 22 + .../testify/assert/assertion_forward.go | 44 + .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 190 +- .../stretchr/testify/require/require.go | 56 + .../testify/require/require_forward.go | 44 + vendor/github.com/ulikunitz/xz/.gitignore | 3 + vendor/github.com/ulikunitz/xz/LICENSE | 2 +- vendor/github.com/ulikunitz/xz/README.md | 4 + vendor/github.com/ulikunitz/xz/TODO.md | 11 +- vendor/github.com/ulikunitz/xz/bits.go | 2 +- vendor/github.com/ulikunitz/xz/crc.go | 2 +- vendor/github.com/ulikunitz/xz/format.go | 2 +- .../ulikunitz/xz/internal/hash/cyclic_poly.go | 2 +- .../ulikunitz/xz/internal/hash/doc.go | 2 +- .../ulikunitz/xz/internal/hash/rabin_karp.go | 2 +- .../ulikunitz/xz/internal/hash/roller.go | 2 +- .../ulikunitz/xz/internal/xlog/xlog.go | 5 +- .../github.com/ulikunitz/xz/lzma/bintree.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/bitops.go | 2 +- .../github.com/ulikunitz/xz/lzma/breader.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/buffer.go | 2 +- .../ulikunitz/xz/lzma/bytewriter.go | 2 +- .../github.com/ulikunitz/xz/lzma/decoder.go | 2 +- .../ulikunitz/xz/lzma/decoderdict.go | 2 +- .../ulikunitz/xz/lzma/directcodec.go | 2 +- .../github.com/ulikunitz/xz/lzma/distcodec.go | 2 +- .../github.com/ulikunitz/xz/lzma/encoder.go | 2 +- .../ulikunitz/xz/lzma/encoderdict.go | 2 +- .../github.com/ulikunitz/xz/lzma/hashtable.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/header.go | 2 +- .../github.com/ulikunitz/xz/lzma/header2.go | 2 +- .../ulikunitz/xz/lzma/lengthcodec.go | 5 +- .../ulikunitz/xz/lzma/literalcodec.go | 2 +- .../ulikunitz/xz/lzma/matchalgorithm.go | 2 +- .../github.com/ulikunitz/xz/lzma/operation.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/prob.go | 2 +- .../ulikunitz/xz/lzma/properties.go | 2 +- .../ulikunitz/xz/lzma/rangecodec.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/reader.go | 4 +- .../github.com/ulikunitz/xz/lzma/reader2.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/state.go | 2 +- .../ulikunitz/xz/lzma/treecodecs.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/writer.go | 2 +- .../github.com/ulikunitz/xz/lzma/writer2.go | 2 +- vendor/github.com/ulikunitz/xz/lzmafilter.go | 2 +- vendor/github.com/ulikunitz/xz/none-check.go | 2 +- vendor/github.com/ulikunitz/xz/reader.go | 2 +- vendor/github.com/ulikunitz/xz/writer.go | 2 +- .../vbatts/tar-split/tar/storage/getter.go | 3 +- vendor/github.com/willf/bitset/.travis.yml | 37 - vendor/github.com/willf/bitset/LICENSE | 27 - vendor/github.com/willf/bitset/README.md | 94 - .../willf/bitset/azure-pipelines.yml | 39 - vendor/github.com/willf/bitset/bitset.go | 931 ---- vendor/github.com/willf/bitset/go.mod | 3 - vendor/github.com/willf/bitset/popcnt.go | 53 - vendor/github.com/willf/bitset/popcnt_19.go | 45 - .../github.com/willf/bitset/popcnt_amd64.go | 68 - vendor/github.com/willf/bitset/popcnt_amd64.s | 104 - .../github.com/willf/bitset/popcnt_generic.go | 24 - .../willf/bitset/trailing_zeros_18.go | 14 - .../willf/bitset/trailing_zeros_19.go | 9 - vendor/go.etcd.io/bbolt/.gitignore | 2 + vendor/go.etcd.io/bbolt/.travis.yml | 3 +- vendor/go.etcd.io/bbolt/Makefile | 2 - vendor/go.etcd.io/bbolt/README.md | 5 +- vendor/go.etcd.io/bbolt/bolt_unix.go | 17 +- vendor/go.etcd.io/bbolt/compact.go | 114 + vendor/go.etcd.io/bbolt/db.go | 66 +- vendor/go.etcd.io/bbolt/freelist_hmap.go | 6 +- vendor/go.etcd.io/bbolt/go.mod | 2 +- vendor/go.etcd.io/bbolt/go.sum | 4 +- vendor/go.etcd.io/bbolt/mlock_unix.go | 36 + vendor/go.etcd.io/bbolt/mlock_windows.go | 11 + vendor/go.etcd.io/bbolt/tx.go | 3 +- vendor/golang.org/x/net/html/parse.go | 24 +- vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ascii.go | 53 + .../x/net/http2/client_conn_pool.go | 79 +- vendor/golang.org/x/net/http2/go115.go | 27 + vendor/golang.org/x/net/http2/headermap.go | 7 +- vendor/golang.org/x/net/http2/not_go115.go | 31 + vendor/golang.org/x/net/http2/server.go | 49 +- vendor/golang.org/x/net/http2/transport.go | 224 +- vendor/golang.org/x/net/http2/write.go | 7 +- vendor/golang.org/x/net/idna/idna10.0.0.go | 113 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 93 +- .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 + vendor/golang.org/x/sys/plan9/race.go | 1 + vendor/golang.org/x/sys/plan9/race0.go | 1 + vendor/golang.org/x/sys/plan9/str.go | 1 + vendor/golang.org/x/sys/plan9/syscall.go | 2 + .../golang.org/x/sys/plan9/syscall_plan9.go | 16 +- .../x/sys/plan9/zsyscall_plan9_386.go | 1 + .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 + .../x/sys/plan9/zsyscall_plan9_arm.go | 1 + vendor/golang.org/x/sys/unix/README.md | 8 +- vendor/golang.org/x/sys/unix/asm_bsd_386.s | 4 +- vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 4 +- .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 29 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + vendor/golang.org/x/sys/unix/endian_little.go | 4 +- .../x/sys/unix/errors_freebsd_386.go | 233 - .../x/sys/unix/errors_freebsd_amd64.go | 233 - .../x/sys/unix/errors_freebsd_arm.go | 226 - .../x/sys/unix/errors_freebsd_arm64.go | 17 - vendor/golang.org/x/sys/unix/fdset.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 101 +- vendor/golang.org/x/sys/unix/mkall.sh | 27 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 40 +- .../golang.org/x/sys/unix/sockcmsg_linux.go | 49 + vendor/golang.org/x/sys/unix/syscall_aix.go | 50 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 101 +- .../x/sys/unix/syscall_darwin.1_13.go | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 150 +- .../x/sys/unix/syscall_darwin_386.go | 51 - .../x/sys/unix/syscall_darwin_arm.go | 51 - .../x/sys/unix/syscall_darwin_libSystem.go | 9 +- .../x/sys/unix/syscall_dragonfly.go | 21 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 342 +- .../x/sys/unix/syscall_freebsd_386.go | 4 +- .../x/sys/unix/syscall_freebsd_amd64.go | 4 +- .../x/sys/unix/syscall_freebsd_arm.go | 2 +- .../x/sys/unix/syscall_freebsd_arm64.go | 2 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 63 + .../golang.org/x/sys/unix/syscall_illumos.go | 13 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 476 +- .../x/sys/unix/syscall_linux_386.go | 54 +- .../x/sys/unix/syscall_linux_alarm.go | 14 + .../x/sys/unix/syscall_linux_amd64.go | 54 +- .../x/sys/unix/syscall_linux_arm.go | 51 +- .../x/sys/unix/syscall_linux_arm64.go | 61 +- .../x/sys/unix/syscall_linux_loong64.go | 226 + .../x/sys/unix/syscall_linux_mips64x.go | 44 +- .../x/sys/unix/syscall_linux_mipsx.go | 44 +- .../x/sys/unix/syscall_linux_ppc.go | 50 +- .../x/sys/unix/syscall_linux_ppc64x.go | 43 +- .../x/sys/unix/syscall_linux_riscv64.go | 57 +- .../x/sys/unix/syscall_linux_s390x.go | 53 +- .../x/sys/unix/syscall_linux_sparc64.go | 42 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 23 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 17 +- .../x/sys/unix/syscall_openbsd_mips64.go | 4 + .../golang.org/x/sys/unix/syscall_solaris.go | 383 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 125 + .../x/sys/unix/syscall_zos_s390x.go | 74 +- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 + .../x/sys/unix/sysvshm_unix_other.go | 14 + .../x/sys/unix/zerrors_darwin_386.go | 1789 -------- .../x/sys/unix/zerrors_darwin_amd64.go | 3120 ++++++------- .../x/sys/unix/zerrors_darwin_arm.go | 1789 -------- .../x/sys/unix/zerrors_darwin_arm64.go | 3120 ++++++------- .../x/sys/unix/zerrors_freebsd_386.go | 114 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 112 +- .../x/sys/unix/zerrors_freebsd_arm.go | 225 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 105 +- .../x/sys/unix/zerrors_freebsd_riscv64.go | 2148 +++++++++ vendor/golang.org/x/sys/unix/zerrors_linux.go | 663 ++- .../x/sys/unix/zerrors_linux_386.go | 29 +- .../x/sys/unix/zerrors_linux_amd64.go | 29 +- .../x/sys/unix/zerrors_linux_arm.go | 29 +- .../x/sys/unix/zerrors_linux_arm64.go | 30 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 ++++ .../x/sys/unix/zerrors_linux_mips.go | 29 +- .../x/sys/unix/zerrors_linux_mips64.go | 29 +- .../x/sys/unix/zerrors_linux_mips64le.go | 29 +- .../x/sys/unix/zerrors_linux_mipsle.go | 29 +- .../x/sys/unix/zerrors_linux_ppc.go | 29 +- .../x/sys/unix/zerrors_linux_ppc64.go | 29 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 29 +- .../x/sys/unix/zerrors_linux_riscv64.go | 29 +- .../x/sys/unix/zerrors_linux_s390x.go | 29 +- .../x/sys/unix/zerrors_linux_sparc64.go | 29 +- .../x/sys/unix/zerrors_openbsd_386.go | 3 + .../x/sys/unix/zerrors_openbsd_arm.go | 3 + .../x/sys/unix/zerrors_zos_s390x.go | 22 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 26 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 20 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_386.go | 2431 ---------- .../x/sys/unix/zsyscall_darwin_386.s | 291 -- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 8 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 668 +-- .../x/sys/unix/zsyscall_darwin_amd64.s | 884 +++- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_arm.go | 2417 ---------- .../x/sys/unix/zsyscall_darwin_arm.s | 289 -- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 8 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 18 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 668 +-- .../x/sys/unix/zsyscall_darwin_arm64.s | 884 +++- .../x/sys/unix/zsyscall_freebsd_386.go | 145 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 143 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 177 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 143 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1889 ++++++++ .../golang.org/x/sys/unix/zsyscall_linux.go | 217 +- .../x/sys/unix/zsyscall_linux_386.go | 54 +- .../x/sys/unix/zsyscall_linux_amd64.go | 88 +- .../x/sys/unix/zsyscall_linux_arm.go | 68 +- .../x/sys/unix/zsyscall_linux_arm64.go | 26 +- .../x/sys/unix/zsyscall_linux_loong64.go | 527 +++ .../x/sys/unix/zsyscall_linux_mips.go | 67 +- .../x/sys/unix/zsyscall_linux_mips64.go | 44 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 47 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 67 +- .../x/sys/unix/zsyscall_linux_ppc.go | 81 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 81 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 81 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 26 +- .../x/sys/unix/zsyscall_linux_s390x.go | 58 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 54 +- .../x/sys/unix/zsyscall_netbsd_386.go | 16 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 16 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 16 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 16 +- .../x/sys/unix/zsyscall_openbsd_386.go | 4 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 4 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 4 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 4 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 102 +- .../x/sys/unix/zsyscall_zos_s390x.go | 42 +- .../x/sys/unix/zsysnum_darwin_386.go | 438 -- .../x/sys/unix/zsysnum_darwin_arm.go | 438 -- .../x/sys/unix/zsysnum_freebsd_386.go | 107 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_riscv64.go | 394 ++ .../x/sys/unix/zsysnum_linux_386.go | 8 + .../x/sys/unix/zsysnum_linux_amd64.go | 716 +-- .../x/sys/unix/zsysnum_linux_arm.go | 8 + .../x/sys/unix/zsysnum_linux_arm64.go | 606 +-- .../x/sys/unix/zsysnum_linux_loong64.go | 311 ++ .../x/sys/unix/zsysnum_linux_mips.go | 7 + .../x/sys/unix/zsysnum_linux_mips64.go | 701 +-- .../x/sys/unix/zsysnum_linux_mips64le.go | 701 +-- .../x/sys/unix/zsysnum_linux_mipsle.go | 7 + .../x/sys/unix/zsysnum_linux_ppc.go | 7 + .../x/sys/unix/zsysnum_linux_ppc64.go | 799 ++-- .../x/sys/unix/zsysnum_linux_ppc64le.go | 799 ++-- .../x/sys/unix/zsysnum_linux_riscv64.go | 604 +-- .../x/sys/unix/zsysnum_linux_s390x.go | 729 +-- .../x/sys/unix/zsysnum_linux_sparc64.go | 757 +-- .../x/sys/unix/ztypes_darwin_386.go | 524 --- .../x/sys/unix/ztypes_darwin_amd64.go | 294 +- .../x/sys/unix/ztypes_darwin_arm.go | 524 --- .../x/sys/unix/ztypes_darwin_arm64.go | 294 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 3 + .../x/sys/unix/ztypes_freebsd_386.go | 104 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 101 +- .../x/sys/unix/ztypes_freebsd_arm.go | 150 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 99 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 626 +++ .../x/sys/unix/ztypes_illumos_amd64.go | 2 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 1885 +++++++- .../golang.org/x/sys/unix/ztypes_linux_386.go | 78 +- .../x/sys/unix/ztypes_linux_amd64.go | 75 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 78 +- .../x/sys/unix/ztypes_linux_arm64.go | 75 +- .../x/sys/unix/ztypes_linux_loong64.go | 685 +++ .../x/sys/unix/ztypes_linux_mips.go | 77 +- .../x/sys/unix/ztypes_linux_mips64.go | 75 +- .../x/sys/unix/ztypes_linux_mips64le.go | 75 +- .../x/sys/unix/ztypes_linux_mipsle.go | 77 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 79 +- .../x/sys/unix/ztypes_linux_ppc64.go | 74 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 74 +- .../x/sys/unix/ztypes_linux_riscv64.go | 75 +- .../x/sys/unix/ztypes_linux_s390x.go | 78 +- .../x/sys/unix/ztypes_linux_sparc64.go | 74 +- .../x/sys/unix/ztypes_netbsd_386.go | 4 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_386.go | 23 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 23 +- .../x/sys/unix/ztypes_openbsd_arm.go | 23 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 23 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 23 +- .../x/sys/unix/ztypes_solaris_amd64.go | 42 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 4 + vendor/golang.org/x/sys/windows/aliases.go | 4 +- vendor/golang.org/x/sys/windows/empty.s | 1 + vendor/golang.org/x/sys/windows/eventlog.go | 1 + .../golang.org/x/sys/windows/exec_windows.go | 74 +- .../x/sys/windows/memory_windows.go | 11 + vendor/golang.org/x/sys/windows/mksyscall.go | 3 +- vendor/golang.org/x/sys/windows/race.go | 1 + vendor/golang.org/x/sys/windows/race0.go | 1 + .../x/sys/windows/security_windows.go | 1 + vendor/golang.org/x/sys/windows/service.go | 14 +- .../x/sys/windows/setupapi_windows.go | 1425 ++++++ .../x/sys/windows/setupapierrors_windows.go | 100 - vendor/golang.org/x/sys/windows/str.go | 1 + vendor/golang.org/x/sys/windows/syscall.go | 1 + .../x/sys/windows/syscall_windows.go | 94 +- .../golang.org/x/sys/windows/types_windows.go | 507 +- .../x/sys/windows/zsyscall_windows.go | 570 ++- vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/Makefile | 30 +- vendor/google.golang.org/grpc/README.md | 2 +- vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/balancer/balancer.go | 14 + .../grpc/balancer/base/balancer.go | 53 +- .../grpc/balancer_conn_wrappers.go | 67 +- .../grpc_binarylog_v1/binarylog.pb.go | 40 +- vendor/google.golang.org/grpc/clientconn.go | 132 +- .../grpc/credentials/credentials.go | 43 +- vendor/google.golang.org/grpc/dialoptions.go | 22 +- .../grpc/encoding/proto/proto.go | 70 +- vendor/google.golang.org/grpc/go.mod | 13 +- vendor/google.golang.org/grpc/go.sum | 64 +- vendor/google.golang.org/grpc/interceptor.go | 36 +- .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../grpc/internal/grpcutil/target.go | 42 +- .../grpc/internal/internal.go | 19 +- .../grpc/internal/metadata/metadata.go | 50 + .../grpc/internal/resolver/config_selector.go | 164 + .../internal/resolver/dns/dns_resolver.go | 43 +- .../grpc/internal/resolver/unix/unix.go | 63 + .../internal/serviceconfig/serviceconfig.go | 72 + .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 20 +- .../grpc/internal/syscall/syscall_nonlinux.go | 2 +- .../grpc/internal/transport/controlbuf.go | 56 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 349 +- .../grpc/internal/transport/http2_server.go | 207 +- .../grpc/internal/transport/http_util.go | 250 +- .../transport/networktype/networktype.go | 46 + .../grpc/{ => internal/transport}/proxy.go | 52 +- .../grpc/internal/transport/transport.go | 20 +- .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 100 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/pickfirst.go | 2 +- vendor/google.golang.org/grpc/regenerate.sh | 42 +- .../grpc/resolver/resolver.go | 2 +- .../grpc/resolver_conn_wrapper.go | 71 +- vendor/google.golang.org/grpc/rpc_util.go | 88 +- vendor/google.golang.org/grpc/server.go | 197 +- .../google.golang.org/grpc/service_config.go | 74 +- vendor/google.golang.org/grpc/stats/stats.go | 4 + .../google.golang.org/grpc/status/status.go | 8 +- vendor/google.golang.org/grpc/stream.go | 138 +- vendor/google.golang.org/grpc/tap/tap.go | 16 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 75 +- .../protobuf/encoding/prototext/decode.go | 3 - .../protobuf/internal/encoding/text/encode.go | 5 + .../protobuf/internal/impl/legacy_message.go | 7 + .../protobuf/internal/version/version.go | 4 +- .../reflect/protoregistry/registry.go | 43 +- .../types/descriptorpb/descriptor.pb.go | 82 - vendor/gopkg.in/yaml.v3/.travis.yml | 16 - vendor/gopkg.in/yaml.v3/apic.go | 1 + vendor/gopkg.in/yaml.v3/decode.go | 143 +- vendor/gopkg.in/yaml.v3/emitterc.go | 58 +- vendor/gopkg.in/yaml.v3/encode.go | 30 +- vendor/gopkg.in/yaml.v3/parserc.go | 57 +- vendor/gopkg.in/yaml.v3/scannerc.go | 49 +- vendor/gopkg.in/yaml.v3/yaml.go | 40 +- vendor/gopkg.in/yaml.v3/yamlh.go | 2 + vendor/modules.txt | 80 +- 945 files changed, 59695 insertions(+), 40194 deletions(-) delete mode 100644 vendor/github.com/BurntSushi/toml/.travis.yml delete mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/github.com/BurntSushi/toml/Makefile create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/go.mod create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go rename vendor/github.com/BurntSushi/toml/{decode_meta.go => meta.go} (60%) delete mode 100644 vendor/github.com/BurntSushi/toml/session.vim rename vendor/github.com/BurntSushi/toml/{type_check.go => type_toml.go} (75%) create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/.golangci.yml create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go delete mode 100644 vendor/github.com/containers/storage/Vagrantfile create mode 100644 vendor/github.com/containers/storage/deprecated.go delete mode 100644 vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go create mode 100644 vendor/github.com/containers/storage/drivers/chown_darwin.go create mode 100644 vendor/github.com/containers/storage/drivers/driver_darwin.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/check_116.go create mode 100644 vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_bsd.go delete mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm_common.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_common.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go create mode 100644 vendor/github.com/containers/storage/storage.conf-freebsd create mode 100644 vendor/github.com/containers/storage/types/default_override_test.conf create mode 100644 vendor/github.com/containers/storage/types/options_darwin.go create mode 100644 vendor/github.com/containers/storage/types/options_freebsd.go create mode 100644 vendor/github.com/containers/storage/types/options_linux.go create mode 100644 vendor/github.com/containers/storage/types/options_windows.go create mode 100644 vendor/github.com/containers/storage/types/storage_broken.conf create mode 100644 vendor/github.com/cyphar/filepath-securejoin/go.mod delete mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf rename vendor/github.com/godbus/dbus/v5/{README.markdown => README.md} (81%) create mode 100644 vendor/github.com/godbus/dbus/v5/transport_unixcred_netbsd.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/decode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/golang/snappy/go.mod create mode 100644 vendor/github.com/klauspost/compress/.gitattributes rename vendor/github.com/{willf/bitset => klauspost/compress}/.gitignore (74%) create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/go.mod rename vendor/github.com/{willf/bitset => klauspost/compress}/go.sum (100%) create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/LICENSE (100%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/decode.go (88%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/decode_other.go (98%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/encode.go (99%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/encode_other.go (98%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/snappy.go (96%) create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s rename vendor/github.com/klauspost/compress/zstd/internal/xxhash/{xxhash_amd64.go => xxhash_asm.go} (54%) create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/.gitignore delete mode 100644 vendor/github.com/mistifyio/go-zfs/.travis.yml delete mode 100644 vendor/github.com/mistifyio/go-zfs/Vagrantfile delete mode 100644 vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go delete mode 100644 vendor/github.com/mistifyio/go-zfs/utils_solaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.envrc create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.gitignore create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/.yamllint create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md rename vendor/github.com/mistifyio/go-zfs/{ => v3}/CONTRIBUTING.md (54%) rename vendor/github.com/mistifyio/go-zfs/{ => v3}/LICENSE (100%) create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/Makefile rename vendor/github.com/mistifyio/go-zfs/{ => v3}/README.md (87%) create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile rename vendor/github.com/mistifyio/go-zfs/{ => v3}/error.go (100%) create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/go.mod create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/go.sum create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/lint.mk create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/rules.mk create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/shell.nix rename vendor/github.com/mistifyio/go-zfs/{ => v3}/utils.go (74%) create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go rename vendor/github.com/mistifyio/go-zfs/{ => v3}/zfs.go (70%) rename vendor/github.com/mistifyio/go-zfs/{ => v3}/zpool.go (71%) create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go create mode 100644 vendor/github.com/modern-go/reflect2/go.mod create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 vendor/github.com/modern-go/reflect2/test.sh create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/devices.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/linux.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/proc.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/userns_deprecated.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/.golangci.yml delete mode 100644 vendor/github.com/seccomp/libseccomp-golang/.travis.yml create mode 100644 vendor/github.com/seccomp/libseccomp-golang/SECURITY.md create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go delete mode 100644 vendor/github.com/willf/bitset/.travis.yml delete mode 100644 vendor/github.com/willf/bitset/LICENSE delete mode 100644 vendor/github.com/willf/bitset/README.md delete mode 100644 vendor/github.com/willf/bitset/azure-pipelines.yml delete mode 100644 vendor/github.com/willf/bitset/bitset.go delete mode 100644 vendor/github.com/willf/bitset/go.mod delete mode 100644 vendor/github.com/willf/bitset/popcnt.go delete mode 100644 vendor/github.com/willf/bitset/popcnt_19.go delete mode 100644 vendor/github.com/willf/bitset/popcnt_amd64.go delete mode 100644 vendor/github.com/willf/bitset/popcnt_amd64.s delete mode 100644 vendor/github.com/willf/bitset/popcnt_generic.go delete mode 100644 vendor/github.com/willf/bitset/trailing_zeros_18.go delete mode 100644 vendor/github.com/willf/bitset/trailing_zeros_19.go create mode 100644 vendor/go.etcd.io/bbolt/compact.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go delete mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ascii.go create mode 100644 vendor/golang.org/x/net/http2/go115.go create mode 100644 vendor/golang.org/x/net/http2/not_go115.go create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go rename vendor/google.golang.org/grpc/{ => internal/transport}/proxy.go (73%) create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml diff --git a/go.mod b/go.mod index 9dea298d4..89507430c 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,14 @@ module github.com/containers/common go 1.15 require ( - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v1.2.1 github.com/containers/image/v5 v5.12.0 github.com/containers/ocicrypt v1.1.1 - github.com/containers/storage v1.32.1 + github.com/containers/storage v1.45.3 github.com/disiqueira/gotree/v3 v3.0.2 github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker v20.10.7+incompatible - github.com/docker/go-units v0.4.0 + github.com/docker/go-units v0.5.0 github.com/ghodss/yaml v1.0.0 github.com/gorilla/mux v1.8.0 // indirect github.com/hashicorp/go-multierror v1.1.1 @@ -20,18 +20,18 @@ require ( github.com/onsi/gomega v1.13.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 - github.com/opencontainers/runc v1.0.0-rc95 + github.com/opencontainers/runc v1.1.4 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/opencontainers/runtime-tools v0.9.0 - github.com/opencontainers/selinux v1.8.1 + github.com/opencontainers/selinux v1.10.2 github.com/pkg/errors v0.9.1 - github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf - github.com/sirupsen/logrus v1.8.1 + github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 + github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.1 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 ) diff --git a/go.sum b/go.sum index 039b219f0..97af6b9ba 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,7 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -38,8 +39,9 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -49,8 +51,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -58,13 +60,16 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.17 h1:yFHH5bghP9ij5Y34PPaMOE8g//oXZ0uJQeMENVo2zcI= -github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= @@ -76,6 +81,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -89,6 +95,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -99,6 +106,7 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -106,6 +114,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= @@ -116,9 +125,12 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -140,6 +152,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -152,8 +165,9 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1 h1:xWHPAoe6VkUiI9GAvndJM7s/0MTrmwX3AQiYTr3olf0= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -181,11 +195,14 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -209,8 +226,8 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E= -github.com/containers/storage v1.32.1 h1:JgvHY5dokiff+Ee4TdvPYO++Oq2BAave5DmyPetH2iU= -github.com/containers/storage v1.32.1/go.mod h1:do6oIF71kfkVS3CPUZr+6He94fIaj6pzF8ywevPuuOw= +github.com/containers/storage v1.45.3 h1:GbtTvTtp3GW2/tcFg5VhgHXcYMwVn2KfZKiHjf9FAOM= +github.com/containers/storage v1.45.3/go.mod h1:OdRUYHrq1HP6iAo79VxqtYuJzC5j4eA2I60jKOoCT7g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -225,17 +242,19 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1 h1:7OO2CXWMYNDdaAzP51t4lCCZWwpQHmvPbm9sxWjm3So= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -250,10 +269,12 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -266,8 +287,9 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6Uezg github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= @@ -281,6 +303,8 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -304,11 +328,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -318,8 +346,9 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -341,6 +370,8 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -358,7 +389,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -370,8 +400,10 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -382,6 +414,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -394,6 +427,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -406,6 +440,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -436,8 +471,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -445,12 +481,13 @@ github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w= github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -464,8 +501,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= +github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -474,16 +512,18 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -499,16 +539,21 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= +github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -520,8 +565,10 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= @@ -530,8 +577,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -542,6 +590,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -550,17 +600,21 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= @@ -580,8 +634,9 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -594,8 +649,10 @@ github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA= -github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= +github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -650,15 +707,17 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0= -github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -667,8 +726,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -697,14 +757,19 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -716,14 +781,17 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI= github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -733,7 +801,6 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= @@ -747,13 +814,15 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -762,6 +831,7 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -811,6 +881,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -830,6 +901,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -841,15 +913,19 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -861,10 +937,12 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -886,6 +964,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -912,6 +991,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -920,6 +1000,7 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -929,9 +1010,17 @@ golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -946,10 +1035,12 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -965,6 +1056,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -984,9 +1076,14 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1003,11 +1100,13 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1029,7 +1128,10 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1044,9 +1146,13 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1058,15 +1164,17 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -1084,14 +1192,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1115,6 +1225,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -1123,8 +1234,12 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1133,6 +1248,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index 0cd380037..fe79e3add 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,5 +1,2 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 8b8afc4f0..000000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 6efcfd0ce..000000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d3..000000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 7c1b37ecc..3651cfa96 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,46 +1,26 @@ -## TOML parser and encoder for Go with reflection - TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/toml-lang/toml +reflection interface similar to Go's standard library `json` and `xml` packages. -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godoc.org/github.com/BurntSushi/toml +Documentation: https://godocs.io/github.com/BurntSushi/toml -Installation: +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). -```bash -go get github.com/BurntSushi/toml -``` +This library requires Go 1.13 or newer; add it to your go.mod with: -Try the toml validator: + % go get github.com/BurntSushi/toml@latest -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` +It also comes with a TOML validator CLI tool: -[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml ### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: +For the simplest example, consider some TOML file as just a list of keys and +values: ```toml Age = 25 @@ -50,29 +30,23 @@ Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` -Which could be defined in Go as: +Which can be decoded with: ```go type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time } -``` -And then decoded with: - -```go var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} +_, err := toml.Decode(tomlData, &conf) ``` -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: ```toml some_key_NAME = "wat" @@ -80,139 +54,67 @@ some_key_NAME = "wat" ```go type TOML struct { - ObscureKey string `toml:"some_key_NAME"` + ObscureKey string `toml:"some_key_NAME"` } ``` -### Using the `encoding.TextUnmarshaler` interface +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. -Here's an example that automatically parses duration strings into -`time.Duration` values: +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: ```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] ``` -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: +Can be decoded with: ```go -type duration struct { - time.Duration +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address } -func (d *duration) UnmarshalText(text []byte) error { +func (a *address) UnmarshalText(text []byte) error { var err error - d.Duration, err = time.ParseDuration(string(text)) + a.Address, err = mail.ParseAddress(string(text)) return err } -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} } ``` -Note that a case insensitive match will be tried if an exact match can't be -found. +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. -A working example of the above can be found in `_examples/example.{go,toml}`. +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index b0fd51d5b..0ca1dc4fe 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,146 +1,188 @@ package toml import ( + "bytes" + "encoding" + "encoding/json" "fmt" "io" "io/ioutil" "math" + "os" "reflect" + "strconv" "strings" "time" ) -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { UnmarshalTOML(interface{}) error } -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. // -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. // -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. // -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. type Primitive struct { undecoded interface{} context Key } -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) +// Decoder decodes TOML data. // -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. // -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) +// TOML table arrays correspond to either a slice of structs or a slice of maps. // -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. // -// TOML datetimes correspond to Go `time.Time` values. +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. // -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. // -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. // -// Key mapping +// ### Key mapping // -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. // -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. // -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) } - return md, md.unify(p.mapping, indirect(rv)) -} -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) -} -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) + p, err := parse(string(data)) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) } // unify performs a sort of type unification based on the structure of `rv`, @@ -149,9 +191,9 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) @@ -163,36 +205,24 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { + if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } - // BUG(burntsushi) + + // TODO: // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). k := rv.Kind() - // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } @@ -218,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) + if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: + case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } - return e("unsupported type %s", rv.Kind()) + return md.e("unsupported type %s", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { @@ -237,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if mapping == nil { return nil } - return e("type mismatch for %s: expected table but found %T", + return md.e("type mismatch for %s: expected table but found %T", rv.Type().String(), mapping) } @@ -259,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { for _, i := range f.index { subv = indirect(subv.Field(i)) } + if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true + md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { + + err := md.unify(datum, subv) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } @@ -277,28 +305,43 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + tmap, ok := mapping.(map[string]interface{}) if !ok { if tmap == nil { return nil } - return badtype("map", mapping) + return md.badtype("map", mapping) } if rv.IsNil() { rv.Set(reflect.MakeMap(rv.Type())) } for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true + md.decoded[md.context.add(k).String()] = struct{}{} md.context = append(md.context, k) - rvkey := indirect(reflect.New(rv.Type().Key())) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { + + err := md.unify(v, indirect(rvval)) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] - rvkey.SetString(k) + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + rv.SetMapIndex(rvkey, rvval) } return nil @@ -310,12 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } @@ -326,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } n := datav.Len() if rv.IsNil() || rv.Cap() < n { @@ -337,37 +378,45 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { return err } } return nil } -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } return nil } - return badtype("time.Time", data) -} -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { if s, ok := data.(string); ok { rv.SetString(s) return nil } - return badtype("string", data) + return md.badtype("string", data) } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + if num, ok := data.(float64); ok { - switch rv.Kind() { + switch rvk { case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } fallthrough case reflect.Float64: rv.SetFloat(num) @@ -376,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } return nil } - return badtype("float", data) + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") + rv.SetInt(int64(dur)) + return nil } - return nil } - return badtype("integer", data) + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { @@ -431,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { rv.SetBool(b) return nil } - return badtype("boolean", data) + return md.badtype("boolean", data) } func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { @@ -439,10 +494,16 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { - case TextMarshaler: + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err @@ -459,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { case float64: s = fmt.Sprintf("%f", sdata) default: - return badtype("primitive (string-like)", data) + return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { return err @@ -467,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { return nil } +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + // rvalue returns a reflect.Value of `v`. All pointers are resolved. func rvalue(v interface{}) reflect.Value { return indirect(reflect.ValueOf(v)) } // indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. // -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { return pv } } @@ -498,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(TextUnmarshaler); ok { + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { return true } return false } - -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 000000000..086d0b686 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000..c6af3f239 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index b371f396e..81a7c0fe9 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -1,27 +1,11 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/toml-lang/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index d905c21a2..930e1d521 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,57 +2,127 @@ package toml import ( "bufio" + "encoding" + "encoding/json" "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" "strings" "time" + + "github.com/BurntSushi/toml/internal" ) type tomlEncodeError struct{ error } var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing ) -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", +var dblQuotedReplacer = strings.NewReplacer( "\"", "\\\"", "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, ) -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. // -// The indentation level can be controlled with the Indent field. +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. type Encoder struct { - // A single indentation level. By default it is two spaces. + // String to use for a single indentation level; default is two spaces. Indent string - // hasWritten is whether we have written any output to w yet. - hasWritten bool w *bufio.Writer + hasWritten bool // written any output to w yet? } -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. +// NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: bufio.NewWriter(w), @@ -60,29 +130,10 @@ func NewEncoder(w io.Writer) *Encoder { } } -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) +// An error is returned if the value given cannot be encoded to a valid TOML +// document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) if err := enc.safeEncode(Key([]string{}), rv); err != nil { @@ -106,13 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) return } @@ -123,12 +176,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) case reflect.Array, reflect.Slice: if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { enc.eArrayOfTables(key, rv) } else { - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) } case reflect.Interface: if rv.IsNil() { @@ -148,55 +201,114 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { case reflect.Struct: enc.eTable(key, rv) default: - panic(e("unsupported type for key '%s': %s", key, k)) + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) } } -// eElement encodes any value that can be an array element (primitives and -// arrays). +// eElement encodes any value that can be an array element. func (enc *Encoder) eElement(rv reflect.Value) { switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { encPanic(err) - } else { - enc.writeQuoted(string(s)) } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) } + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: enc.wf(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) case reflect.Interface: enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) default: - panic(e("unexpected primitive type: %s", rv.Kind())) + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) } } -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. func floatAddDecimal(fstr string) string { if !strings.Contains(fstr, ".") { return fstr + ".0" @@ -205,14 +317,14 @@ func floatAddDecimal(fstr string) string { } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { - elem := rv.Index(i) + elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { enc.wf(", ") @@ -226,44 +338,43 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) + trv := eindirect(rv.Index(i)) if isNil(trv) { continue } - panicIfInvalidKey(key) enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[[%s]]", enc.indentStr(key), key) enc.newline() - enc.eMapOrStruct(key, trv) + enc.eMapOrStruct(key, trv, false) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) if len(key) == 1 { // Output an extra newline between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[%s]", enc.indentStr(key), key) enc.newline() } - enc.eMapOrStruct(key, rv) + enc.eMapOrStruct(key, rv, false) } -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { case reflect.Map: - enc.eMap(key, rv) + enc.eMap(key, rv, inline) case reflect.Struct: - enc.eStruct(key, rv) + enc.eStruct(key, rv, inline) default: + // Should never happen? panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) } } -func (enc *Encoder) eMap(key Key, rv reflect.Value) { +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { rt := rv.Type() if rt.Key().Kind() != reflect.String { encPanic(errNonString) @@ -274,118 +385,180 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) } } - var writeMapKeys = func(mapKeys []string) { + var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { continue } - enc.encode(key.add(mapKey), mrv) + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } } } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t } -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that + // a field that creates a new table then all keys under it will be in that // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. continue } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue } } - if typeIsHash(tomlTypeOfGo(frv)) { + if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } } } } addFields(rt, rv, nil) - var writeFields = func(fields [][]int) { + writeFields := func(fields [][]int) { for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } - opts := getOptions(sft.Tag) + opts := getOptions(fieldType.Tag) if opts.skip { continue } - keyName := sft.Name + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && isEmpty(sf) { + + if opts.omitempty && enc.isEmpty(fieldVal) { continue } - if opts.omitzero && isZero(sf) { + if opts.omitzero && isZero(fieldVal) { continue } - enc.encode(key.add(keyName), sf) + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } } } + + if inline { + enc.wf("{") + } writeFields(fieldsDirect) writeFields(fieldsSub) + if inline { + enc.wf("}") + } } -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + switch rv.Kind() { case reflect.Bool: return tomlBool @@ -397,7 +570,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { + if isTableArray(rv) { return tomlArrayHash } return tomlArray @@ -407,54 +580,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { return tomlString case reflect.Map: return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") } } -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false } - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) + + if ret && !typeEqual(tomlHash, tt) { + ret = false } } - return firstType + return ret } type tagOptions struct { @@ -495,10 +649,26 @@ func isZero(rv reflect.Value) bool { return false } -func isEmpty(rv reflect.Value) bool { +func (enc *Encoder) isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !enc.isEmpty(rv.Field(i)) { + return false + } + } + return true case reflect.Bool: return !rv.Bool() } @@ -511,18 +681,31 @@ func (enc *Encoder) newline() { } } -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { if len(key) == 0 { encPanic(errNoKey) } - panicIfInvalidKey(key) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) - enc.newline() + if !inline { + enc.newline() + } } func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { encPanic(err) } enc.hasWritten = true @@ -536,13 +719,25 @@ func encPanic(err error) { panic(tomlEncodeError{err}) } +// Resolve any level of pointers to the actual value (e.g. **string → string). func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { return v } + + return eindirect(v.Elem()) } func isNil(rv reflect.Value) bool { @@ -553,16 +748,3 @@ func isNil(rv reflect.Value) bool { return false } } - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd60..000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d04..000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 000000000..f4f390e64 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod new file mode 100644 index 000000000..82989481d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/go.mod @@ -0,0 +1,3 @@ +module github.com/BurntSushi/toml + +go 1.16 diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000..022f15bc2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index e0a742a88..d4d70871d 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -2,6 +2,8 @@ package toml import ( "fmt" + "reflect" + "runtime" "strings" "unicode" "unicode/utf8" @@ -29,33 +31,20 @@ const ( itemArrayTableStart itemArrayTableEnd itemKeyStart + itemKeyEnd itemCommentStart itemInlineTableStart itemInlineTableEnd ) -const ( - eof = 0 - comma = ',' - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' - inlineTableStart = '{' - inlineTableEnd = '}' -) +const eof = 0 type stateFn func(lx *lexer) stateFn +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + type lexer struct { input string start int @@ -64,26 +53,26 @@ type lexer struct { state stateFn items chan item - // Allow for backing up up to three runes. - // This is necessary because TOML contains 3-rune tokens (""" and '''). - prevWidths [3]int - nprev int // how many of prevWidths are in use - // If we emit an eof, we can still back up, but it is not OK to call - // next again. - atEOF bool + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. stack []stateFn } type item struct { - typ itemType - val string - line int + typ itemType + val string + err error + pos Position } func (lx *lexer) nextItem() item { @@ -93,6 +82,7 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) } } } @@ -101,9 +91,9 @@ func lex(input string) *lexer { lx := &lexer{ input: input, state: lexTop, - line: 1, items: make(chan item, 10), stack: make([]stateFn, 0, 10), + line: 1, } return lx } @@ -125,19 +115,36 @@ func (lx *lexer) current() string { return lx.input[lx.start:lx.pos] } +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} lx.start = lx.pos } func (lx *lexer) next() (r rune) { if lx.atEOF { - panic("next called after EOF") + panic("BUG in lexer: next called after EOF") } if lx.pos >= len(lx.input) { lx.atEOF = true @@ -147,12 +154,25 @@ func (lx *lexer) next() (r rune) { if lx.input[lx.pos] == '\n' { lx.line++ } + lx.prevWidths[3] = lx.prevWidths[2] lx.prevWidths[2] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 3 { + if lx.nprev < 4 { lx.nprev++ } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + lx.prevWidths[0] = w lx.pos += w return r @@ -163,19 +183,21 @@ func (lx *lexer) ignore() { lx.start = lx.pos } -// backup steps back one rune. Can be called only twice between calls to next. +// backup steps back one rune. Can be called 4 times between calls to next. func (lx *lexer) backup() { if lx.atEOF { lx.atEOF = false return } if lx.nprev < 1 { - panic("backed up too far") + panic("BUG in lexer: backed up too far") } w := lx.prevWidths[0] lx.prevWidths[0] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] lx.nprev-- + lx.pos -= w if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { lx.line-- @@ -211,18 +233,58 @@ func (lx *lexer) skip(pred func(rune) bool) { } } -// errorf stops all lexing by emitting an error and returning `nil`. +// error stops all lexing by emitting an error and returning `nil`. +// // Note that any value that is a character is escaped if it's a special // character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} return nil } +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + // lexTop consumes elements at the top level of TOML data. func lexTop(lx *lexer) stateFn { r := lx.next() @@ -230,10 +292,10 @@ func lexTop(lx *lexer) stateFn { return lexSkip(lx, lexTop) } switch r { - case commentStart: + case '#': lx.push(lexTop) return lexCommentStart - case tableStart: + case '[': return lexTableStart case eof: if lx.pos > lx.start { @@ -256,7 +318,7 @@ func lexTop(lx *lexer) stateFn { func lexTopEnd(lx *lexer) stateFn { r := lx.next() switch { - case r == commentStart: + case r == '#': // a comment will read to a newline for us. lx.push(lexTop) return lexCommentStart @@ -269,8 +331,9 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf("expected a top-level item to end with a newline, "+ - "comment, or EOF, but got %q instead", r) + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -279,7 +342,7 @@ func lexTopEnd(lx *lexer) stateFn { // It also handles the case that this is an item in an array of tables. // e.g., '[[name]]'. func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { + if lx.peek() == '[' { lx.next() lx.emit(itemArrayTableStart) lx.push(lexArrayTableEnd) @@ -296,9 +359,8 @@ func lexTableEnd(lx *lexer) stateFn { } func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("expected end of table array name delimiter %q, "+ - "but got %q instead", arrayTableEnd, r) + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) } lx.emit(itemArrayTableEnd) return lexTopEnd @@ -307,31 +369,18 @@ func lexArrayTableEnd(lx *lexer) stateFn { func lexTableNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("unexpected end of table name " + - "(table names cannot be empty)") - case r == tableSep: - return lx.errorf("unexpected table separator " + - "(table names cannot be empty)") - case r == stringStart || r == rawStringStart: + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': lx.ignore() lx.push(lexTableNameEnd) - return lexValue // reuse string lexing + return lexQuotedName default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName + lx.push(lexTableNameEnd) + return lexBareName } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd } // lexTableNameEnd reads the end of a piece of a table name, optionally @@ -341,69 +390,107 @@ func lexTableNameEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexTableNameEnd - case r == tableSep: + case r == '.': lx.ignore() return lexTableNameStart - case r == tableEnd: + case r == ']': return lx.pop() default: - return lx.errorf("expected '.' or ']' to end table name, "+ - "but got %q instead", r) + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) } } -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() switch { - case r == keySep: - return lx.errorf("unexpected key separator %q", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': lx.ignore() + fallthrough + default: // Bare key lx.emit(itemKeyStart) - return lexBareKey + return lexKeyNameStart } } -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName default: - return lx.errorf("bare keys cannot contain %q", r) + lx.push(lexKeyEnd) + return lexBareName } } // lexKeyEnd consumes the end of a key and trims whitespace (up to the key // separator). func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) case isWhitespace(r): return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) default: - return lx.errorf("expected key separator %q, but got %q instead", - keySep, r) + return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -422,17 +509,17 @@ func lexValue(lx *lexer) stateFn { return lexNumberOrDateStart } switch r { - case arrayStart: + case '[': lx.ignore() lx.emit(itemArray) return lexArrayValue - case inlineTableStart: + case '{': lx.ignore() lx.emit(itemInlineTableStart) return lexInlineTableValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { + case '"': + if lx.accept('"') { + if lx.accept('"') { lx.ignore() // Ignore """ return lexMultilineString } @@ -440,9 +527,9 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the '"' return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { lx.ignore() // Ignore """ return lexMultilineRawString } @@ -450,10 +537,15 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the "'" return lexRawString - case '+', '-': - return lexNumberStart case '.': // special error case, be kind to users return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart } if unicode.IsLetter(r) { // Be permissive here; lexBool will give a nice error if the @@ -463,6 +555,9 @@ func lexValue(lx *lexer) stateFn { lx.backup() return lexBool } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } return lx.errorf("expected value but found %q instead", r) } @@ -473,14 +568,12 @@ func lexArrayValue(lx *lexer) stateFn { switch { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValue) - case r == commentStart: + case r == '#': lx.push(lexArrayValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == arrayEnd: - // NOTE(caleb): The spec isn't clear about whether you can have - // a trailing comma or not, so we'll allow it. + case r == ']': return lexArrayEnd } @@ -493,23 +586,20 @@ func lexArrayValue(lx *lexer) stateFn { // the next value (or the end of the array): it ignores whitespace and newlines // and expects either a ',' or a ']'. func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: + case r == '#': lx.push(lexArrayValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() return lexArrayValue // move on to the next value - case r == arrayEnd: + case r == ']': return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) } - return lx.errorf( - "expected a comma or array terminator %q, but got %q instead", - arrayEnd, r, - ) } // lexArrayEnd finishes the lexing of an array. @@ -528,13 +618,13 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd } lx.backup() @@ -546,23 +636,33 @@ func lexInlineTableValue(lx *lexer) stateFn { // key/value pair and the next pair (or the end of the table): // it ignores whitespace and expects either a ',' or a '}'. func lexInlineTableValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } return lexInlineTableValue - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) } - return lx.errorf("expected a comma or an inline table terminator %q, "+ - "but got %q instead", inlineTableEnd, r) +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" } // lexInlineTableEnd finishes the lexing of an inline table. @@ -579,13 +679,13 @@ func lexString(lx *lexer) stateFn { r := lx.next() switch { case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"'`) case isNL(r): - return lx.errorf("strings cannot contain newlines") + return lx.errorPrevLine(errLexStringNL{}) case r == '\\': lx.push(lexString) return lexStringEscape - case r == stringEnd: + case r == '"': lx.backup() lx.emit(itemString) lx.next() @@ -598,19 +698,47 @@ func lexString(lx *lexer) stateFn { // lexMultilineString consumes the inner contents of a string. It assumes that // the beginning '"""' has already been consumed and ignored. func lexMultilineString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineString case eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"""'`) case '\\': return lexMultilineStringEscape - case stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() lx.emit(itemMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -618,8 +746,8 @@ func lexMultilineString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineString } - return lexMultilineString } // lexRawString consumes a raw string. Nothing can be escaped in such a string. @@ -627,35 +755,54 @@ func lexMultilineString(lx *lexer) stateFn { func lexRawString(lx *lexer) stateFn { r := lx.next() switch { + default: + return lexRawString case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected "'"`) case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == rawStringEnd: + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': lx.backup() lx.emit(itemRawString) lx.next() lx.ignore() return lx.pop() } - return lexRawString } // lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and +// a string. It assumes that the beginning ''' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineRawString case eof: - return lx.errorf("unexpected EOF") - case rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. lx.backup() lx.backup() lx.emit(itemRawMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -663,15 +810,14 @@ func lexMultilineRawString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineRawString } - return lexMultilineRawString } // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { + if isNL(lx.next()) { /// \ escaping newline. return lexMultilineString } lx.backup() @@ -694,6 +840,10 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '"': fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough case '\\': return lx.pop() case 'u': @@ -701,9 +851,7 @@ func lexStringEscape(lx *lexer) stateFn { case 'U': return lexLongUnicodeEscape } - return lx.errorf("invalid escape character %q; only the following "+ - "escape characters are allowed: "+ - `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) + return lx.error(errLexEscape{r}) } func lexShortUnicodeEscape(lx *lexer) stateFn { @@ -711,8 +859,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected four hexadecimal digits after '\u', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) } } return lx.pop() @@ -723,28 +872,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 8; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected eight hexadecimal digits after '\U', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) } } return lx.pop() } -// lexNumberOrDateStart consumes either an integer, a float, or datetime. +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("floats must start with a digit, not '.'") + case '0': + return lexBaseNumberOrDate } - return lx.errorf("expected a digit but got %q", r) + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate } // lexNumberOrDate consumes either an integer, float or datetime. @@ -754,10 +908,10 @@ func lexNumberOrDate(lx *lexer) stateFn { return lexNumberOrDate } switch r { - case '-': + case '-', ':': return lexDatetime case '_': - return lexNumber + return lexDecimalNumber case '.', 'e', 'E': return lexFloat } @@ -775,41 +929,156 @@ func lexDatetime(lx *lexer) stateFn { return lexDatetime } switch r { - case '-', 'T', ':', '.', 'Z', '+': + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': return lexDatetime } lx.backup() - lx.emit(itemDatetime) + lx.emitTrim(itemDatetime) return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("floats must start with a digit, not '.'") + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) } - return lx.errorf("expected a digit but got %q", r) + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") } - return lexNumber + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) } -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. if isDigit(r) { - return lexNumber + return lexNumberOrDate } switch r { case '_': - return lexNumber + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber case '.', 'e', 'E': return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger } lx.backup() @@ -867,49 +1136,31 @@ func lexCommentStart(lx *lexer) stateFn { // It will consume *up to* the first newline character, and pass control // back to the last state on the stack. func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() lx.emit(itemText) return lx.pop() + default: + return lexComment } - lx.next() - return lexComment } // lexSkip ignores all slurped input and moves on to the next state. func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') + lx.ignore() + return nextState } -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" } func (itype itemType) String() string { @@ -938,12 +1189,18 @@ func (itype itemType) String() string { return "TableEnd" case itemKeyStart: return "KeyStart" + case itemKeyEnd: + return "KeyEnd" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" case itemCommentStart: return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" } panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) } @@ -951,3 +1208,26 @@ func (itype itemType) String() string { func (item item) String() string { return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) } + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/meta.go similarity index 60% rename from vendor/github.com/BurntSushi/toml/decode_meta.go rename to vendor/github.com/BurntSushi/toml/meta.go index b9914a679..71847a041 100644 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -1,33 +1,40 @@ package toml -import "strings" +import ( + "strings" +) -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo mapping map[string]interface{} - types map[string]tomlType keys []Key - decoded map[string]bool - context Key // Used only during decoding. + decoded map[string]struct{} + data []byte // Input file; for errors. } -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., +// IsDefined reports if the key exists in the TOML data. // -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. // -// IsDefined will return false if an empty key given. Keys are case sensitive. +// Returns false for an empty key. func (md *MetaData) IsDefined(key ...string) bool { if len(key) == 0 { return false } - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) for _, k := range key { if hash, ok = hashOrVal.(map[string]interface{}); !ok { return false @@ -41,58 +48,20 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type returns a string representation of the type of the key specified. // -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() } return "" } -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - // Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. // -// The list will have the same order as the keys appeared in the TOML data. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. // // All keys returned are non-empty. func (md *MetaData) Keys() []Key { @@ -102,7 +71,7 @@ func (md *MetaData) Keys() []Key { // Undecoded returns all keys that have not been decoded in the order in which // they appear in the original TOML document. // -// This includes keys that haven't been decoded because of a Primitive value. +// This includes keys that haven't been decoded because of a [Primitive] value. // Once the Primitive value is decoded, the keys will be considered decoded. // // Also note that decoding into an empty interface will result in no decoding, @@ -113,9 +82,40 @@ func (md *MetaData) Keys() []Key { func (md *MetaData) Undecoded() []Key { undecoded := make([]Key, 0, len(md.keys)) for _, key := range md.keys { - if !md.decoded[key.String()] { + if _, ok := md.decoded[key.String()]; !ok { undecoded = append(undecoded, key) } } return undecoded } + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 50869ef92..d2542d6f9 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -5,54 +5,69 @@ import ( "strconv" "strings" "time" - "unicode" "unicode/utf8" + + "github.com/BurntSushi/toml/internal" ) type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. - // the base key name for everything except hashes - currentKey string + ordered []Key // List of keys in the order that they appear in the TOML data. - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } -type parseError string - -func (pe parseError) Error() string { - return string(pe) +type keyInfo struct { + pos Position + tomlType tomlType } func parse(data string) (p *parser, err error) { defer func() { if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr return } panic(r) } }() + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + p = &parser{ + keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - types: make(map[string]tomlType), lx: lex(data), ordered: make([]Key, 0), - implicits: make(map[string]bool), + implicits: make(map[string]struct{}), } for { item := p.next() @@ -65,20 +80,57 @@ func parse(data string) (p *parser, err error) { return p, nil } +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) } func (p *parser) next() item { it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) if it.typ == itemError { - p.panicf("%s", it.val) + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) } return it } +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + func (p *parser) bug(format string, v ...interface{}) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -97,44 +149,60 @@ func (p *parser) assertEqual(expected, got itemType) { func (p *parser) topLevel(item item) { switch item.typ { - case itemCommentStart: - p.approxLine = item.line + case itemCommentStart: // # .. p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line + case itemTableStart: // [ .. ] + name := p.nextPos() var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemTableEnd, kg.typ) + p.assertEqual(itemTableEnd, name.typ) - p.establishContext(key, false) - p.setType("", tomlHash) + p.addContext(key, false) + p.setType("", tomlHash, item.pos) p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemArrayTableEnd, kg.typ) + p.assertEqual(itemArrayTableEnd, name.typ) - p.establishContext(key, true) - p.setType("", tomlArrayHash) + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) @@ -148,180 +216,261 @@ func (p *parser) keyString(it item) string { return it.val case itemString, itemMultilineString, itemRawString, itemRawMultilineString: - s, _ := p.value(it) + s, _ := p.value(it, false) return s.(string) default: p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") } + panic("unreachable") } +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { switch it.typ { case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) case itemBool: switch it.val { case "true": return true, p.typeOfPrimitive(it) case "false": return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue } - return array, p.typeOfArray(types) - case itemInlineTableStart: - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - p.context = append(p.context, p.currentKey) - p.currentKey = "" - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ != itemKeyStart { - p.bug("Expected key start but instead found %q, around line %d", - it.val, p.approxLine) - } - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) - // retrieve key - k := p.next() - p.approxLine = k.line - kname := p.keyString(k) + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) - // retrieve value - p.currentKey = kname - val, typ := p.value(p.next()) - // make sure we keep metadata up to date - p.setType(kname, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[kname] = val + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false } // numUnderscoresOK checks whether each underscore in s is surrounded by // characters that are not underscores. func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } accept := false for _, r := range s { if r == '_' { if !accept { return false } - accept = false - continue } - accept = true + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) } return accept } @@ -338,13 +487,12 @@ func numPeriodsOK(s string) bool { return !period } -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. // // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { +func (p *parser) addContext(key Key, array bool) { var ok bool // Always start at the top level and drill down for our context. @@ -383,7 +531,7 @@ func (p *parser) establishContext(key Key, array bool) { // list of tables for it. k := key[len(key)-1] if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) + hashContext[k] = make([]map[string]interface{}, 0, 4) } // Add a new table. But make sure the key hasn't already been used @@ -391,8 +539,7 @@ func (p *parser) establishContext(key Key, array bool) { if hash, ok := hashContext[k].([]map[string]interface{}); ok { hashContext[k] = append(hash, make(map[string]interface{})) } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) + p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { p.setValue(key[len(key)-1], make(map[string]interface{})) @@ -400,15 +547,23 @@ func (p *parser) establishContext(key Key, array bool) { p.context = append(p.context, key[len(key)-1]) } +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) + +} + // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) for _, k := range p.context { keyContext = append(keyContext, k) if tmpHash, ok = hash[k]; !ok { @@ -422,24 +577,26 @@ func (p *parser) setValue(key string, value interface{}) { case map[string]interface{}: hash = t default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) + p.panicf("Key '%s' has already been defined.", keyContext) } } keyContext = append(keyContext, key) if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) // // But we have to make sure to stop marking it as an implicit. (So that // another redefinition provokes an error.) // // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } if p.isImplicit(keyContext) { p.removeImplicit(keyContext) return @@ -449,40 +606,39 @@ func (p *parser) setValue(key string, value interface{}) { // key, which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } + hash[key] = value } -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { +func (p *parser) setType(key string, typ tomlType, pos Position) { keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } + keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes keyContext = append(keyContext, key) } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} } -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) } // current returns the full key name of the current context. @@ -497,24 +653,62 @@ func (p *parser) current() string { } func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s + if len(s) > 0 && s[0] == '\n' { + return s[1:] } - return s[1:] + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s } -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) +// Remove newlines inside triple-quoted strings if a line ends with "\". +func (p *parser) stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + if i == len(split)-1 { + p.panicf("invalid escape: '\\ '") + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") } } - return strings.Join(esc, "") + return strings.Join(split, "") } -func (p *parser) replaceEscapes(str string) string { - var replaced []rune +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) s := []byte(str) r := 0 for r < len(s) { @@ -532,7 +726,8 @@ func (p *parser) replaceEscapes(str string) string { switch s[r] { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) case 'b': replaced = append(replaced, rune(0x0008)) r += 1 @@ -558,14 +753,14 @@ func (p *parser) replaceEscapes(str string) string { // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) replaced = append(replaced, escaped) r += 5 case 'U': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+9). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) replaced = append(replaced, escaped) r += 9 } @@ -573,20 +768,14 @@ func (p *parser) replaceEscapes(str string) string { return string(replaced) } -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) } if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) } return rune(hex) } - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be0..000000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 608997c22..254ca82e5 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_toml.go similarity index 75% rename from vendor/github.com/BurntSushi/toml/type_check.go rename to vendor/github.com/BurntSushi/toml/type_toml.go index c73f8afc1..4e90d7737 100644 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool { return t1.typeString() == t2.typeString() } -func typeIsHash(t tomlType) bool { +func typeIsTable(t tomlType) bool { return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) } @@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) panic("unreachable") } - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md index 60c93fe50..683be1dcf 100644 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -11,12 +11,27 @@ package. Please see the LICENSE file for licensing information. -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. +## Contributing +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index cb461ca31..2342a7fcd 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -5,7 +5,6 @@ package backuptar import ( "archive/tar" "encoding/base64" - "errors" "fmt" "io" "io/ioutil" @@ -42,19 +41,14 @@ const ( hdrCreationTime = "LIBARCHIVE.creationtime" ) -func writeZeroes(w io.Writer, count int64) error { - buf := make([]byte, 8192) - c := len(buf) - for i := int64(0); i < count; i += int64(c) { - if int64(c) > count-i { - c = int(count - i) - } - _, err := w.Write(buf[:c]) - if err != nil { - return err - } +// zeroReader is an io.Reader that always returns 0s. +type zeroReader struct{} + +func (zr zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 } - return nil + return len(b), nil } func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { @@ -71,16 +65,26 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { return fmt.Errorf("unexpected stream %d", bhdr.Id) } + // We can't seek backwards, since we have already written that data to the tar.Writer. + if bhdr.Offset < curOffset { + return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset) + } // archive/tar does not support writing sparse files // so just write zeroes to catch up to the current offset. - err = writeZeroes(t, bhdr.Offset-curOffset) + if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) + } if bhdr.Size == 0 { + // A sparse block with size = 0 is used to mark the end of the sparse blocks. break } n, err := io.Copy(t, br) if err != nil { return err } + if n != bhdr.Size { + return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset) + } curOffset = bhdr.Offset + n } return nil @@ -109,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta return hdr } +// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file +// from the tar header and returns the security descriptor into a byte slice. +func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { + // Maintaining old SDDL-based behavior for backward + // compatibility. All new tar headers written by this library + // will have raw binary for the security descriptor. + var sd []byte + var err error + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + return sd, nil +} + +// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the +// current file from the tar header and returns it as a byte slice. +func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { + var eas []winio.ExtendedAttribute + var eadata []byte + var err error + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err = winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + } + return eadata, nil +} + +// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header +// and encodes it into a byte slice. The file for which this function is called must be a +// symlink. +func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + return winio.EncodeReparsePoint(&rp) +} + // WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. // // This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. @@ -221,20 +288,44 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } } + // The logic for copying file contents is fairly complicated due to the need for handling sparse files, + // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream + // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also + // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse + // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described + // in the list at the bottom of this block comment. + // + // Sparse files can be represented in four different ways, based on the specifics of the file. + // - Size = 0: + // Previously: BackupRead yields no data stream and no sparse block streams. + // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. + // - Size > 0, no allocated ranges: + // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with + // size = 0 and offset = . + // - Size > 0, one allocated range: + // BackupRead yields a data stream with size = containing the file contents. There are no + // sparse block streams. This is the case if you take a normal file with contents and simply set the + // sparse flag on it. + // - Size > 0, multiple allocated ranges: + // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated + // range of the file containing the range contents. Finally there is a sparse block stream with + // size = 0 and offset = . + if dataHdr != nil { // A data stream was found. Copy the data. - if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { + // We assume that we will either have a data stream size > 0 XOR have sparse block streams. + if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { if size != dataHdr.Size { return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) } - _, err = io.Copy(t, br) - if err != nil { - return err + if _, err = io.Copy(t, br); err != nil { + return fmt.Errorf("%s: copying contents from data stream: %s", name, err) } - } else { - err = copySparse(t, br) - if err != nil { - return err + } else if size > 0 { + // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. + // These files have no sparse block streams, so skip the copySparse call if file size = 0. + if err = copySparse(t, br); err != nil { + return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) } } } @@ -279,7 +370,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } else { // Unsupported for now, since the size of the alternate stream is not present // in the backup stream until after the data has been read. - return errors.New("tar of sparse alternate data streams is unsupported") + return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) } case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams @@ -330,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win // tar file that was not processed, or io.EOF is there are no more. func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } + + sd, err := SecurityDescriptorFromTarHeader(hdr) + if err != nil { + return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ @@ -360,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } - var eas []winio.ExtendedAttribute - for k, v := range hdr.PAXRecords { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) + + eadata, err := ExtendedAttributesFromTarHeader(hdr) + if err != nil { + return nil, err } - if len(eas) != 0 { - eadata, err := winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } + if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), @@ -392,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } + if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.PAXRecords[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) + reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), @@ -411,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( if err != nil { return nil, err } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { bhdr := winio.BackupHeader{ Id: winio.BackupData, diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 0385e4108..293ab54c8 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -143,6 +144,11 @@ func (f *win32File) Close() error { return nil } +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + // prepareIo prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIo() (*ioOperation, error) { diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod index 98a8dea0e..f39a608da 100644 --- a/vendor/github.com/Microsoft/go-winio/go.mod +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -1,9 +1,8 @@ module github.com/Microsoft/go-winio -go 1.12 +go 1.13 require ( - github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.7.0 golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c ) diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum index aa6ad3b57..9bdcd9cfd 100644 --- a/vendor/github.com/Microsoft/go-winio/go.sum +++ b/vendor/github.com/Microsoft/go-winio/go.sum @@ -1,14 +1,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b632f8f8b..b2b644d00 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error { return conn.sock.Close() } +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) if err != nil { return os.NewSyscallError("shutdown", err) } return nil } -// CloseRead shuts down the read end of the socket. +// CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { @@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error { return nil } -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index f497c0e39..2d9161e2d 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -14,8 +14,6 @@ import ( "encoding/binary" "fmt" "strconv" - - "golang.org/x/sys/windows" ) // Variant specifies which GUID variant (or "type") of the GUID. It determines @@ -41,13 +39,6 @@ type Version uint8 var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000..f64d828c0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000..83617f4ee --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go index fca241590..602920786 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -3,11 +3,10 @@ package security import ( + "fmt" "os" "syscall" "unsafe" - - "github.com/pkg/errors" ) type ( @@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error { // Stat (to determine if `name` is a directory). s, err := os.Stat(name) if err != nil { - return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) } // Get a handle to the file/directory. Must defer Close on success. @@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error { sd := uintptr(0) origDACL := uintptr(0) if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) @@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error { // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) } return nil @@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { } fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) } return fd, nil } @@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp // Generate pointers to the SIDs based on the string SIDs sid, err := syscall.StringToSid(sidVmGroup) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) } inheritance := inheritModeNoInheritance @@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp modifiedDACL := uintptr(0) if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) } return modifiedDACL, nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go index c40c2739b..d7096716c 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -2,6 +2,6 @@ package security //go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go -//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo -//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo -//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go index 4a90cb3cc..4084680e0 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -45,26 +45,26 @@ var ( procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") ) -func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) - if r1 != 0 { - err = errnoErr(e1) +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) - if r1 != 0 { - err = errnoErr(e1) +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) - if r1 != 0 { - err = errnoErr(e1) +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index b03b789e6..f7f78fc23 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package vhd @@ -7,17 +8,16 @@ import ( "syscall" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) //go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go -//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk -//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk -//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath type ( CreateVirtualDiskFlag uint32 @@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } +// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + type AttachVersion2 struct { RestrictedOffset uint64 RestrictedLength uint64 } type AttachVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 + Version uint32 Version2 AttachVersion2 } @@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { return err } - if err := syscall.CloseHandle(handle); err != nil { - return err - } - return nil + return syscall.CloseHandle(handle) } // DetachVirtualDisk detaches a virtual hard disk by handle. func DetachVirtualDisk(handle syscall.Handle) (err error) { if err := detachVirtualDisk(handle, 0, 0); err != nil { - return errors.Wrap(err, "failed to detach virtual disk") + return fmt.Errorf("failed to detach virtual disk: %w", err) } return nil } @@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua parameters, nil, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) { AttachVirtualDiskFlagNone, ¶ms, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual var ( handle syscall.Handle defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 ) if parameters.Version != 2 { return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } if err := openVirtualDisk( &defaultType, vhdPath, uint32(virtualDiskAccessMask), uint32(openVirtualDiskFlags), - parameters, + params, &handle, ); err != nil { - return 0, errors.Wrap(err, "failed to open virtual disk") + return 0, fmt.Errorf("failed to open virtual disk: %w", err) } return handle, nil } @@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, nil, &handle, ); err != nil { - return handle, errors.Wrap(err, "failed to create virtual disk") + return handle, fmt.Errorf("failed to create virtual disk: %w", err) } return handle, nil } @@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { &diskPathSizeInBytes, &diskPhysicalPathBuf[0], ); err != nil { - return "", errors.Wrap(err, "failed to get disk physical path") + return "", fmt.Errorf("failed to get disk physical path: %w", err) } return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil } @@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error createParams, ) if err != nil { - return fmt.Errorf("failed to create differencing vhd: %s", err) + return fmt.Errorf("failed to create differencing vhd: %w", err) } if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %s", err) + return fmt.Errorf("failed to close differencing vhd handle: %w", err) } return nil } diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 572f7b42f..1d7498db3 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -47,60 +47,60 @@ var ( procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") ) -func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) - if r1 != 0 { - err = errnoErr(e1) +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { return } return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) } -func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) - if r1 != 0 { - err = errnoErr(e1) +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) - if r1 != 0 { - err = errnoErr(e1) +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { return } return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) } -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - err = errnoErr(e1) +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore index aec9bd4bb..54ed6f06c 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -1,3 +1,38 @@ +# Binaries for programs and plugins *.exe -.idea -.vscode +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz + +# Make stuff +.rootfs-done +bin/* +rootfs/* +*.o +/build/ + +deps/* +out/* + +.idea/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 000000000..2400e7f1e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,99 @@ +run: + timeout: 8m + +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 000000000..a8f5516cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,87 @@ +BASE:=base.tar.gz + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook + +.PHONY: all always rootfs test + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && go test -v ./internal/guest/... + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch + tar -zcf $@ -C rootfs . + rm -rf rootfs + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +-include deps/cmd/gcs.gomake +-include deps/cmd/gcstools.gomake + +# Implicit rule for includes that define Go targets. +%.gomake: $(SRCROOT)/Makefile + @mkdir -p $(dir $@) + @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new + @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new + @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new + @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new + @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new + @/bin/echo -e '\tmv $$@.new $$@' >> $@.new + @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new + mv $@.new $@ + +VPATH=$(SRCROOT) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 95c300365..b8ca926a9 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -2,13 +2,67 @@ [![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) -This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. -It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. @@ -16,7 +70,27 @@ When you submit a pull request, a CLA-bot will automatically determine whether y a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project. +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +### Test Directory (Important to note) + +This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this +project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has +its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file +has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project +(which is the repo itself on your disk). + +``` +replace ( + github.com/Microsoft/hcsshim => ../ +) +``` + +Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the +CI in this project will check if the files are out of date and will fail if this is true. ## Code of Conduct diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index 794308673..f367022e7 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -59,7 +59,7 @@ var ( // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState - // ErrProcNotFound is an error encountered when the the process cannot be found + // ErrProcNotFound is an error encountered when a procedure look up fails. ErrProcNotFound = hcs.ErrProcNotFound // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 @@ -159,7 +159,7 @@ func (e *ProcessError) Error() string { // IsNotExist checks if an error is caused by the Container or Process not existing. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { if _, ok := err.(EndpointNotFoundError); ok { return true @@ -192,7 +192,7 @@ func IsTimeout(err error) bool { // a Container or Process being already stopped. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsAlreadyStopped(err error) bool { return hcs.IsAlreadyStopped(getInnerError(err)) } diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod index 21640a6b9..9c60dd302 100644 --- a/vendor/github.com/Microsoft/hcsshim/go.mod +++ b/vendor/github.com/Microsoft/hcsshim/go.mod @@ -3,22 +3,34 @@ module github.com/Microsoft/hcsshim go 1.13 require ( + github.com/BurntSushi/toml v0.3.1 github.com/Microsoft/go-winio v0.4.17 + github.com/cenkalti/backoff/v4 v4.1.1 github.com/containerd/cgroups v1.0.1 github.com/containerd/console v1.0.2 - github.com/containerd/containerd v1.5.1 + github.com/containerd/containerd v1.5.7 github.com/containerd/go-runc v1.0.0 - github.com/containerd/ttrpc v1.0.2 + github.com/containerd/ttrpc v1.1.0 github.com/containerd/typeurl v1.0.2 github.com/gogo/protobuf v1.3.2 - github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d + github.com/golang/mock v1.6.0 + github.com/google/go-cmp v0.5.6 + github.com/google/go-containerregistry v0.5.1 + github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 + github.com/mattn/go-shellwords v1.0.6 + github.com/opencontainers/runc v1.0.2 + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.7.0 + github.com/sirupsen/logrus v1.8.1 github.com/urfave/cli v1.22.2 + github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae + go.etcd.io/bbolt v1.3.6 go.opencensus.io v0.22.3 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210324051608-47abb6519492 - google.golang.org/grpc v1.33.2 + golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e + google.golang.org/grpc v1.40.0 ) replace ( diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum index b7bede13f..93c37657f 100644 --- a/vendor/github.com/Microsoft/hcsshim/go.sum +++ b/vendor/github.com/Microsoft/hcsshim/go.sum @@ -9,6 +9,7 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -33,6 +34,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -51,11 +53,14 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -72,6 +77,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -80,10 +86,13 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -91,6 +100,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -125,8 +135,9 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1 h1:xWHPAoe6VkUiI9GAvndJM7s/0MTrmwX3AQiYTr3olf0= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -157,12 +168,15 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -190,15 +204,19 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -212,13 +230,22 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -250,18 +277,25 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -284,6 +318,9 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -297,8 +334,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -308,8 +346,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -318,6 +360,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -329,6 +372,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -351,10 +395,12 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -375,13 +421,16 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -390,8 +439,11 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -407,26 +459,33 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -435,21 +494,26 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -497,6 +561,7 @@ github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -506,8 +571,9 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -550,9 +616,11 @@ github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= @@ -563,12 +631,15 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -621,6 +692,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -636,6 +708,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -647,13 +720,17 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -665,10 +742,12 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -686,6 +765,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -712,6 +792,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -719,6 +800,7 @@ golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -726,23 +808,31 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -756,6 +846,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -774,8 +865,13 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -792,10 +888,12 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -808,15 +906,19 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -861,6 +963,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -869,8 +972,12 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -879,6 +986,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 408312672..9e0059447 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -7,6 +7,9 @@ import ( // HNSEndpoint represents a network endpoint in HNS type HNSEndpoint = hns.HNSEndpoint +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + // Namespace represents a Compartment. type Namespace = hns.Namespace @@ -108,3 +111,8 @@ func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { return hns.GetHNSEndpointByName(endpointName) } + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go index 27a62a723..f46af33bb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -86,6 +86,12 @@ type Container interface { // container to be terminated by some error condition (including calling // Close). Wait() error + // WaitChannel returns the wait channel of the container + WaitChannel() <-chan struct{} + // WaitError returns the container termination error. + // This function should only be called after the channel in WaitChannel() + // is closed. Otherwise it is not thread safe. + WaitError() error // Modify sends a request to modify container resources Modify(ctx context.Context, config interface{}) error } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 7696e4b48..295d4b849 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -60,7 +60,7 @@ var ( // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) - // ErrProcNotFound is an error encountered when the the process cannot be found + // ErrProcNotFound is an error encountered when a procedure look up fails. ErrProcNotFound = syscall.Errno(0x7f) // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 @@ -78,6 +78,13 @@ var ( // ErrNotSupported is an error encountered when hcs doesn't support the request ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) ) type ErrorEvent struct { @@ -147,7 +154,7 @@ func (e *HcsError) Error() string { func (e *HcsError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *HcsError) Timeout() bool { @@ -186,7 +193,7 @@ func (e *SystemError) Error() string { func (e *SystemError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *SystemError) Timeout() bool { @@ -217,7 +224,7 @@ func (e *ProcessError) Error() string { func (e *ProcessError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *ProcessError) Timeout() bool { @@ -242,12 +249,19 @@ func makeProcessError(process *Process, op string, err error, events []ErrorEven // IsNotExist checks if an error is caused by the Container or Process not existing. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { err = getInnerError(err) return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound || - err == ErrProcNotFound + err == ErrElementNotFound +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle } // IsAlreadyClosed checks if an error is caused by the Container or Process having been @@ -278,12 +292,12 @@ func IsTimeout(err error) bool { // a Container or Process being already stopped. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +// will currently return true when the error is ErrElementNotFound. func IsAlreadyStopped(err error) bool { err = getInnerError(err) return err == ErrVmcomputeAlreadyStopped || - err == ErrElementNotFound || - err == ErrProcNotFound + err == ErrProcessAlreadyStopped || + err == ErrElementNotFound } // IsNotSupported returns a boolean indicating whether the error is caused by diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8f2034668..f4605922a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -3,7 +3,9 @@ package hcs import ( "context" "encoding/json" + "errors" "io" + "os" "sync" "syscall" "time" @@ -16,16 +18,17 @@ import ( // ContainerError is an error encountered in HCS type Process struct { - handleLock sync.RWMutex - handle vmcompute.HcsProcess - processID int - system *System - hasCachedStdio bool - stdioLock sync.Mutex - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - callbackNumber uintptr + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool closedWaitOnce sync.Once waitBlock chan struct{} @@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) { return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) } + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } events := processHcsResult(ctx, resultJSON) delivered, err := process.processSignalResult(ctx, err) if err != nil { err = makeProcessError(process, operation, err, events) } + + process.killSignalDelivered = delivered return delivered, err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go index bcfeb34d5..70884aad7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -27,4 +27,10 @@ type Attachment struct { CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go index 4fb231076..39a54432c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -31,4 +31,6 @@ type Container struct { RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go index f1a28cd38..0be0475d4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -14,5 +14,5 @@ type CpuGroupConfig struct { Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` // Hypervisor CPU group IDs exposed to clients - HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"` + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go index 107caddad..31c4538af 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -12,9 +12,9 @@ package hcsschema type DeviceType string const ( - ClassGUID DeviceType = "ClassGuid" - DeviceInstance DeviceType = "DeviceInstance" - GPUMirror DeviceType = "GpuMirror" + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" ) type Device struct { @@ -22,6 +22,6 @@ type Device struct { Type DeviceType `json:"Type,omitempty"` // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` - // The location path of the device to assign to the container. Only used when Type is DeviceInstance. + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. LocationPath string `json:"LocationPath,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 000000000..8dbe40b3b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 000000000..8fe89f927 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 000000000..a62568d89 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 000000000..a7410febd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 000000000..355364064 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 000000000..7be98b541 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 000000000..3ab9cf1ec --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 000000000..d2f51b3b5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 000000000..47dfb55bf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 000000000..8867ebe5f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 000000000..9ef322f61 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 75499c967..a76f6b253 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -4,17 +4,22 @@ import ( "context" "encoding/json" "errors" + "fmt" "strings" "sync" "syscall" + "time" "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/hcs/schema1" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/jobobject" "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/timeout" "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) @@ -28,7 +33,8 @@ type System struct { waitBlock chan struct{} waitError error exitError error - os, typ string + os, typ, owner string + startTime time.Time } func newSystem(id string) *System { @@ -38,6 +44,11 @@ func newSystem(id string) *System { } } +// Implementation detail for silo naming, this should NOT be relied upon very heavily. +func siloNameFmt(containerID string) string { + return fmt.Sprintf(`\Container_%s`, containerID) +} + // CreateComputeSystem creates a new compute system with the given configuration but does not start it. func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { operation := "hcs::CreateComputeSystem" @@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error { } computeSystem.typ = strings.ToLower(props.SystemType) computeSystem.os = strings.ToLower(props.RuntimeOSType) + computeSystem.owner = strings.ToLower(props.Owner) if computeSystem.os == "" && computeSystem.typ == "container" { // Pre-RS5 HCS did not return the OS, but it only supported containers // that ran Windows. @@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { if err != nil { return makeSystemError(computeSystem, operation, err, events) } - + computeSystem.startTime = time.Now() return nil } @@ -275,11 +287,19 @@ func (computeSystem *System) waitBackground() { oc.SetSpanStatus(span, err) } +func (computeSystem *System) WaitChannel() <-chan struct{} { + return computeSystem.waitBlock +} + +func (computeSystem *System) WaitError() error { + return computeSystem.waitError +} + // Wait synchronously waits for the compute system to shutdown or terminate. If // the compute system has already exited returns the previous error (if any). func (computeSystem *System) Wait() error { - <-computeSystem.waitBlock - return computeSystem.waitError + <-computeSystem.WaitChannel() + return computeSystem.WaitError() } // ExitError returns an error describing the reason the compute system terminated. @@ -324,11 +344,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr return properties, nil } -// PropertiesV2 returns the requested container properties targeting a V2 schema container. -func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() +// queryInProc handles querying for container properties without reaching out to HCS. `props` +// will be updated to contain any data returned from the queries present in `types`. If any properties +// failed to be queried they will be tallied up and returned in as the first return value. Failures on +// query are NOT considered errors; the only failure case for this method is if the containers job object +// cannot be opened. +func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { + // In the future we can make use of some new functionality in the HCS that allows you + // to pass a job object for HCS to use for the container. Currently, the only way we'll + // be able to open the job/silo is if we're running as SYSTEM. + jobOptions := &jobobject.Options{ + UseNTVariant: true, + Name: siloNameFmt(computeSystem.id), + } + job, err := jobobject.Open(ctx, jobOptions) + if err != nil { + return nil, err + } + defer job.Close() + + var fallbackQueryTypes []hcsschema.PropertyType + for _, propType := range types { + switch propType { + case hcsschema.PTStatistics: + // Handle a bad caller asking for the same type twice. No use in re-querying if this is + // filled in already. + if props.Statistics == nil { + props.Statistics, err = computeSystem.statisticsInProc(job) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") + + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + default: + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + + return fallbackQueryTypes, nil +} + +// statisticsInProc emulates what HCS does to grab statistics for a given container with a small +// change to make grabbing the private working set total much more efficient. +func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { + // Start timestamp for these stats before we grab them to match HCS + timestamp := time.Now() + + memInfo, err := job.QueryMemoryStats() + if err != nil { + return nil, err + } + + processorInfo, err := job.QueryProcessorStats() + if err != nil { + return nil, err + } + + storageInfo, err := job.QueryStorageStats() + if err != nil { + return nil, err + } + + // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation + // with the class SystemProcessInformation which returns an array containing system information for *every* + // process running on the machine. They then grab the pids that are running in the container and filter down + // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't + // work well as performance should get worse if more processess are running on the machine in general and not + // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored + // as well which isn't great and is wasted work to fetch. + // + // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private + // working set ourselves and ask for everything else seperately. The optimization we can make here is + // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating + // the private working set in a more efficient manner by: + // + // 1. Find the pids running in the silo + // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) + // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters + // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. + privateWorkingSet, err := job.QueryPrivateWorkingSet() + if err != nil { + return nil, err + } + + return &hcsschema.Statistics{ + Timestamp: timestamp, + ContainerStartTime: computeSystem.startTime, + Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, + Memory: &hcsschema.MemoryStats{ + MemoryUsageCommitBytes: memInfo.JobMemory, + MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, + MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, + }, + Processor: &hcsschema.ProcessorStats{ + RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), + RuntimeUser100ns: uint64(processorInfo.TotalUserTime), + TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), + }, + Storage: &hcsschema.StorageStats{ + ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), + ReadSizeBytes: storageInfo.ReadStats.TotalSize, + WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), + WriteSizeBytes: storageInfo.WriteStats.TotalSize, + }, + }, nil +} +// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. +func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { operation := "hcs::System::PropertiesV2" queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) @@ -345,12 +469,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem if propertiesJSON == "" { return nil, ErrUnexpectedValue } - properties := &hcsschema.Properties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + props := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { return nil, makeSystemError(computeSystem, operation, err, nil) } - return properties, nil + return props, nil +} + +// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // Let HCS tally up the total for VM based queries instead of querying ourselves. + if computeSystem.typ != "container" { + return computeSystem.hcsPropertiesV2Query(ctx, types) + } + + // Define a starter Properties struct with the default fields returned from every + // query. Owner is only returned from Statistics but it's harmless to include. + properties := &hcsschema.Properties{ + Id: computeSystem.id, + SystemType: computeSystem.typ, + RuntimeOsType: computeSystem.os, + Owner: computeSystem.owner, + } + + logEntry := log.G(ctx) + // First lets try and query ourselves without reaching to HCS. If any of the queries fail + // we'll take note and fallback to querying HCS for any of the failed types. + fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) + if err == nil && len(fallbackTypes) == 0 { + return properties, nil + } else if err != nil { + logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) + fallbackTypes = types + } + + logEntry.WithFields(logrus.Fields{ + logfields.ContainerID: computeSystem.id, + "propertyTypes": fallbackTypes, + }).Info("falling back to HCS for property type queries") + + hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) + if err != nil { + return nil, err + } + + // Now add in anything that we might have successfully queried in process. + if properties.Statistics != nil { + hcsProperties.Statistics = properties.Statistics + hcsProperties.Owner = properties.Owner + } + + // For future support for querying processlist in-proc as well. + if properties.ProcessList != nil { + hcsProperties.ProcessList = properties.ProcessList + } + + return hcsProperties, nil } // Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index b36315a39..7cf954c7b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -20,6 +20,7 @@ type HNSEndpoint struct { IPv6Address net.IP `json:",omitempty"` DNSSuffix string `json:",omitempty"` DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` GatewayAddress string `json:",omitempty"` GatewayAddressV6 string `json:",omitempty"` EnableInternalDNS bool `json:",omitempty"` @@ -30,6 +31,7 @@ type HNSEndpoint struct { EnableLowMetric bool `json:",omitempty"` Namespace *Namespace `json:",omitempty"` EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` } //SystemType represents the type of the system on which actions are done @@ -57,6 +59,18 @@ type EndpointResquestResponse struct { Error string } +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + // HNSEndpointRequest makes a HNS call to modify/query a network endpoint func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { endpoint := &HNSEndpoint{} @@ -79,11 +93,27 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) { return endpoint, nil } +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + // GetHNSEndpointByID get the Endpoint by ID func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { return HNSEndpointRequest("GET", endpointID, "") } +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + // GetHNSEndpointByName gets the endpoint filtered by Name func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { hnsResponse, err := HNSListEndpointRequest() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 6765aaead..84b368218 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -21,10 +21,11 @@ const ( ) type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string - InternalPort uint16 - ExternalPort uint16 + Type PolicyType `json:"Type"` + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + ExternalPortReserved bool `json:",omitempty"` } type QosPolicy struct { @@ -88,20 +89,20 @@ const ( type ACLPolicy struct { Type PolicyType `json:"Type"` Id string `json:"Id,omitempty"` - Protocol uint16 - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` Action ActionType Direction DirectionType - LocalAddresses string - RemoteAddresses string - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 - ServiceName string + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` } type Policy struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go new file mode 100644 index 000000000..5d6acd69e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -0,0 +1,111 @@ +package jobobject + +import ( + "context" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ioInitOnce sync.Once + initIOErr error + // Global iocp handle that will be re-used for every job object + ioCompletionPort windows.Handle + // Mapping of job handle to queue to place notifications in. + jobMap sync.Map +) + +// MsgAllProcessesExited is a type representing a message that every process in a job has exited. +type MsgAllProcessesExited struct{} + +// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. +// This should not be treated as an error. +type MsgUnimplemented struct{} + +// pollIOCP polls the io completion port forever. +func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { + var ( + overlapped uintptr + code uint32 + key uintptr + ) + + for { + err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) + if err != nil { + log.G(ctx).WithError(err).Error("failed to poll for job object message") + continue + } + if val, ok := jobMap.Load(key); ok { + msq, ok := val.(*queue.MessageQueue) + if !ok { + log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") + continue + } + notification, err := parseMessage(code, overlapped) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("failed to parse job object message") + continue + } + if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + // Write will only return an error when the queue is closed. + // The only time a queue would ever be closed is when we call `Close` on + // the job it belongs to which also removes it from the jobMap, so something + // went wrong here. We can't return as this is reading messages for all jobs + // so just log it and move on. + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("tried to write to a closed queue") + continue + } + } else { + log.G(ctx).Warn("received a message for a job not present in the mapping") + } + } +} + +func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { + // Check code and parse out relevant information related to that notification + // that we care about. For now all we handle is the message that all processes + // in the job have exited. + switch code { + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: + return MsgAllProcessesExited{}, nil + // Other messages for completeness and a check to make sure that if we fall + // into the default case that this is a code we don't know how to handle. + case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: + case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: + case winapi.JOB_OBJECT_MSG_NEW_PROCESS: + case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: + default: + return nil, fmt.Errorf("unknown job notification type: %d", code) + } + return MsgUnimplemented{}, nil +} + +// Assigns an IO completion port to get notified of events for the registered job +// object. +func attachIOCP(job windows.Handle, iocp windows.Handle) error { + info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ + CompletionKey: job, + CompletionPort: iocp, + } + _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go new file mode 100644 index 000000000..c9fdd921a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -0,0 +1,538 @@ +package jobobject + +import ( + "context" + "errors" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +// This file provides higher level constructs for the win32 job object API. +// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" +// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information +// structs and associated limit flags. Whatever is not present from the job object API +// in golang.org/x/sys/windows is located in /internal/winapi. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects + +// JobObject is a high level wrapper around a Windows job object. Holds a handle to +// the job, a queue to receive iocp notifications about the lifecycle +// of the job and a mutex for synchronized handle access. +type JobObject struct { + handle windows.Handle + mq *queue.MessageQueue + handleLock sync.RWMutex +} + +// JobLimits represents the resource constraints that can be applied to a job object. +type JobLimits struct { + CPULimit uint32 + CPUWeight uint32 + MemoryLimitInBytes uint64 + MaxIOPS int64 + MaxBandwidth int64 +} + +type CPURateControlType uint32 + +const ( + WeightBased CPURateControlType = iota + RateBased +) + +// Processor resource controls +const ( + cpuLimitMin = 1 + cpuLimitMax = 10000 + cpuWeightMin = 1 + cpuWeightMax = 9 +) + +var ( + ErrAlreadyClosed = errors.New("the handle has already been closed") + ErrNotRegistered = errors.New("job is not registered to receive notifications") +) + +// Options represents the set of configurable options when making or opening a job object. +type Options struct { + // `Name` specifies the name of the job object if a named job object is desired. + Name string + // `Notifications` specifies if the job will be registered to receive notifications. + // Defaults to false. + Notifications bool + // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. + // Defaults to false. + UseNTVariant bool + // `IOTracking` enables tracking I/O statistics on the job object. More specifically this + // calls SetInformationJobObject with the JobObjectIoAttribution class. + EnableIOTracking bool +} + +// Create creates a job object. +// +// If options.Name is an empty string, the job will not be assigned a name. +// +// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. +// +// If `options` is nil, use default option values. +// +// Returns a JobObject structure and an error if there is one. +func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil { + options = &Options{} + } + + var jobName *winapi.UnicodeString + if options.Name != "" { + jobName, err = winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + } + + var jobHandle windows.Handle + if options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: jobName, + Attributes: 0, + } + status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + var jobNameBuf *uint16 + if jobName != nil && jobName.Buffer != nil { + jobNameBuf = jobName.Buffer + } + jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + if options.EnableIOTracking { + if err := enableIOTracking(jobHandle); err != nil { + return nil, err + } + } + + return job, nil +} + +// Open opens an existing job object with name provided in `options`. If no name is provided +// return an error since we need to know what job object to open. +// +// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. +// +// Returns a JobObject structure and an error if there is one. +func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil || (options != nil && options.Name == "") { + return nil, errors.New("no job object name specified to open") + } + + unicodeJobName, err := winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + + var jobHandle windows.Handle + if options != nil && options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: unicodeJobName, + Attributes: 0, + } + status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options != nil && options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + return job, nil +} + +// helper function to setup notifications for creating/opening a job object +func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInitOnce.Do(func() { + h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + initIOErr = err + return + } + ioCompletionPort = h + go pollIOCP(ctx, h) + }) + + if initIOErr != nil { + return nil, initIOErr + } + + mq := queue.NewMessageQueue() + jobMap.Store(uintptr(job.handle), mq) + if err := attachIOCP(job.handle, ioCompletionPort); err != nil { + jobMap.Delete(uintptr(job.handle)) + return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) + } + return mq, nil +} + +// PollNotification will poll for a job object notification. This call should only be called once +// per job (ideally in a goroutine loop) and will block if there is not a notification ready. +// This call will return immediately with error `ErrNotRegistered` if the job was not registered +// to receive notifications during `Create`. Internally, messages will be queued and there +// is no worry of messages being dropped. +func (job *JobObject) PollNotification() (interface{}, error) { + if job.mq == nil { + return nil, ErrNotRegistered + } + return job.mq.Dequeue() +} + +// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to +// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process +// has already started running. +func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := attrList.Update( + winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, + unsafe.Pointer(&job.handle), + unsafe.Sizeof(job.handle), + ); err != nil { + return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) + } + + return nil +} + +// Close closes the job object handle. +func (job *JobObject) Close() error { + job.handleLock.Lock() + defer job.handleLock.Unlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := windows.Close(job.handle); err != nil { + return err + } + + if job.mq != nil { + job.mq.Close() + } + // Handles now invalid so if the map entry to receive notifications for this job still + // exists remove it so we can stop receiving notifications. + if _, ok := jobMap.Load(uintptr(job.handle)); ok { + jobMap.Delete(uintptr(job.handle)) + } + + job.handle = 0 + return nil +} + +// Assign assigns a process to the job object. +func (job *JobObject) Assign(pid uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if pid == 0 { + return errors.New("invalid pid: 0") + } + hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) + if err != nil { + return err + } + defer windows.Close(hProc) + return windows.AssignProcessToJobObject(job.handle, hProc) +} + +// Terminate terminates the job, essentially calls TerminateProcess on every process in the +// job. +func (job *JobObject) Terminate(exitCode uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + if job.handle == 0 { + return ErrAlreadyClosed + } + return windows.TerminateJobObject(job.handle, exitCode) +} + +// Pids returns all of the process IDs in the job object. +func (job *JobObject) Pids() ([]uint32, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} + err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ) + + // This is either the case where there is only one process or no processes in + // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList + // is 1 and just return this, otherwise return an empty slice. + if err == nil { + if info.NumberOfProcessIdsInList == 1 { + return []uint32{uint32(info.ProcessIdList[0])}, nil + } + // Return empty slice instead of nil to play well with the caller of this. + // Do not return an error if no processes are running inside the job + return []uint32{}, nil + } + + if err != winapi.ERROR_MORE_DATA { + return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) + } + + jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) + buf := make([]byte, jobBasicProcessIDListSize) + if err = winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&buf[0]), + uint32(len(buf)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) + } + + bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) + pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) + for i, bufPid := range bufInfo.AllPids() { + pids[i] = uint32(bufPid) + } + return pids, nil +} + +// QueryMemoryStats gets the memory stats for the job object. +func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectMemoryUsageInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) + } + return &info, nil +} + +// QueryProcessorStats gets the processor stats for the job object. +func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicAccountingInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object process stats: %w", err) + } + return &info, nil +} + +// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error +// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking() +// hasn't been called since creation of the job. +func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectIoAttribution, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) + } + return &info, nil +} + +// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the +// private working set for every process running in the job. +func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { + pids, err := job.Pids() + if err != nil { + return 0, err + } + + openAndQueryWorkingSet := func(pid uint32) (uint64, error) { + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) + if err != nil { + // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a + // case where one of the pids in the job exited before we open. + return 0, nil + } + defer func() { + _ = windows.Close(h) + }() + // Check if the process is actually running in the job still. There's a small chance + // that the process could have exited and had its pid re-used between grabbing the pids + // in the job and opening the handle to it above. + var inJob int32 + if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { + // This shouldn't fail unless we have incorrect access rights which we control + // here so probably best to error out if this failed. + return 0, err + } + // Don't report stats for this process as it's not running in the job. This shouldn't be + // an error condition though. + if inJob == 0 { + return 0, nil + } + + var vmCounters winapi.VM_COUNTERS_EX2 + status := winapi.NtQueryInformationProcess( + h, + winapi.ProcessVmCounters, + unsafe.Pointer(&vmCounters), + uint32(unsafe.Sizeof(vmCounters)), + nil, + ) + if !winapi.NTSuccess(status) { + return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) + } + return uint64(vmCounters.PrivateWorkingSetSize), nil + } + + var jobWorkingSetSize uint64 + for _, pid := range pids { + workingSet, err := openAndQueryWorkingSet(pid) + if err != nil { + return 0, err + } + jobWorkingSetSize += workingSet + } + + return jobWorkingSetSize, nil +} + +// SetIOTracking enables IO tracking for processes in the job object. +// This enables use of the QueryStorageStats method. +func (job *JobObject) SetIOTracking() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + return enableIOTracking(job.handle) +} + +func enableIOTracking(job windows.Handle) error { + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if _, err := windows.SetInformationJobObject( + job, + winapi.JobObjectIoAttribution, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { + return fmt.Errorf("failed to enable IO tracking on job object: %w", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go new file mode 100644 index 000000000..4efde292c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -0,0 +1,315 @@ +package jobobject + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +const ( + memoryLimitMax uint64 = 0xffffffffffffffff +) + +func isFlagSet(flag, controlFlags uint32) bool { + return (flag & controlFlags) == flag +} + +// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). +func (job *JobObject) SetResourceLimits(limits *JobLimits) error { + // Go through and check what limits were specified and apply them to the job. + if limits.MemoryLimitInBytes != 0 { + if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { + return fmt.Errorf("failed to set job object memory limit: %w", err) + } + } + + if limits.CPULimit != 0 { + if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } else if limits.CPUWeight != 0 { + if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } + + if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { + if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { + return fmt.Errorf("failed to set io limit on job object: %w", err) + } + } + return nil +} + +// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate +// all processes in the job on the last open handle being closed. +func (job *JobObject) SetTerminateOnLastHandleClose() error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE + return job.setExtendedInformation(info) +} + +// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. +func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { + if memoryLimitInBytes >= memoryLimitMax { + return errors.New("memory limit specified exceeds the max size") + } + + info, err := job.getExtendedInformation() + if err != nil { + return err + } + + info.JobMemoryLimit = uintptr(memoryLimitInBytes) + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY + return job.setExtendedInformation(info) +} + +// GetMemoryLimit gets the memory limit in bytes of the job object. +func (job *JobObject) GetMemoryLimit() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.JobMemoryLimit), nil +} + +// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to +// `rateControlValue` for the job object. +func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { + cpuInfo, err := job.getCPURateControlInformation() + if err != nil { + return err + } + switch rateControlType { + case WeightBased: + if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { + return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + cpuInfo.Value = rateControlValue + case RateBased: + if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { + return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + cpuInfo.Value = rateControlValue + default: + return errors.New("invalid job object cpu rate control type") + } + return job.setCPURateControlInfo(cpuInfo) +} + +// GetCPULimit gets the cpu limits for the job object. +// `rateControlType` is used to indicate what type of cpu limit to query for. +func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { + info, err := job.getCPURateControlInformation() + if err != nil { + return 0, err + } + + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { + return 0, errors.New("the job does not have cpu rate control enabled") + } + + switch rateControlType { + case WeightBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { + return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") + } + case RateBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { + return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") + } + default: + return 0, errors.New("invalid job object cpu rate control type") + } + return info.Value, nil +} + +// SetCPUAffinity sets the processor affinity for the job object. +// The affinity is passed in as a bitmask. +func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) + return job.setExtendedInformation(info) +} + +// GetCPUAffinity gets the processor affinity for the job object. +// The returned affinity is a bitmask. +func (job *JobObject) GetCPUAffinity() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.BasicLimitInformation.Affinity), nil +} + +// SetIOLimit sets the IO limits specified on the job object. +func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { + ioInfo, err := job.getIOLimit() + if err != nil { + return err + } + ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE + if maxBandwidth != 0 { + ioInfo.MaxBandwidth = maxBandwidth + } + if maxIOPS != 0 { + ioInfo.MaxIops = maxIOPS + } + return job.setIORateControlInfo(ioInfo) +} + +// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. +func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxBandwidth, nil +} + +// GetIOMaxIopsLimit gets the max iops for the job object. +func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxIops, nil +} + +// Helper function for getting a job object's extended information. +func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for getting a job object's CPU rate control information. +func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for setting a job object's extended information. +func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(info)), + uint32(unsafe.Sizeof(*info)), + ); err != nil { + return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) + } + return nil +} + +// Helper function for querying job handle for IO limit information. +func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} + var blockCount uint32 = 1 + + if _, err := winapi.QueryIoRateControlInformationJobObject( + job.handle, + nil, + &ioInfo, + &blockCount, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) + } + + if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { + return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) + } + return ioInfo, nil +} + +// Helper function for setting a job object's IO rate control information. +func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { + return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) + } + return nil +} + +// Helper function for setting a job object's CPU rate control information. +func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + uintptr(unsafe.Pointer(cpuInfo)), + uint32(unsafe.Sizeof(cpuInfo)), + ); err != nil { + return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go new file mode 100644 index 000000000..4eb9bb9f1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go @@ -0,0 +1,92 @@ +package queue + +import ( + "errors" + "sync" +) + +var ErrQueueClosed = errors.New("the queue is closed for reading and writing") + +// MessageQueue represents a threadsafe message queue to be used to retrieve or +// write messages to. +type MessageQueue struct { + m *sync.RWMutex + c *sync.Cond + messages []interface{} + closed bool +} + +// NewMessageQueue returns a new MessageQueue. +func NewMessageQueue() *MessageQueue { + m := &sync.RWMutex{} + return &MessageQueue{ + m: m, + c: sync.NewCond(m), + messages: []interface{}{}, + } +} + +// Enqueue writes `msg` to the queue. +func (mq *MessageQueue) Enqueue(msg interface{}) error { + mq.m.Lock() + defer mq.m.Unlock() + + if mq.closed { + return ErrQueueClosed + } + mq.messages = append(mq.messages, msg) + // Signal a waiter that there is now a value available in the queue. + mq.c.Signal() + return nil +} + +// Dequeue will read a value from the queue and remove it. If the queue +// is empty, this will block until the queue is closed or a value gets enqueued. +func (mq *MessageQueue) Dequeue() (interface{}, error) { + mq.m.Lock() + defer mq.m.Unlock() + + for !mq.closed && mq.size() == 0 { + mq.c.Wait() + } + + // We got woken up, check if it's because the queue got closed. + if mq.closed { + return nil, ErrQueueClosed + } + + val := mq.messages[0] + mq.messages[0] = nil + mq.messages = mq.messages[1:] + return val, nil +} + +// Size returns the size of the queue. +func (mq *MessageQueue) Size() int { + mq.m.RLock() + defer mq.m.RUnlock() + return mq.size() +} + +// Nonexported size check to check if the queue is empty inside already locked functions. +func (mq *MessageQueue) size() int { + return len(mq.messages) +} + +// Close closes the queue for future writes or reads. Any attempts to read or write from the +// queue after close will return ErrQueueClosed. This is safe to call multiple times. +func (mq *MessageQueue) Close() { + mq.m.Lock() + defer mq.m.Unlock() + + // Already closed, noop + if mq.closed { + return + } + + mq.messages = nil + mq.closed = true + // If there's anybody currently waiting on a value from Dequeue, we need to + // broadcast so the read(s) can return ErrQueueClosed. + mq.c.Broadcast() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index ff81ac2c1..5debe974d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -21,7 +21,7 @@ func ActivateLayer(ctx context.Context, path string) (err error) { err = activateLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index ffee31ab1..480aee872 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -21,7 +21,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) { err = createLayer(&stdDriverInfo, path, parent) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index 5a3809ae2..131aa94f1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -28,7 +28,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str err = createSandboxLayer(&stdDriverInfo, path, 0, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index 787054e79..424467ac3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -19,7 +19,7 @@ func DestroyLayer(ctx context.Context, path string) (err error) { err = destroyLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index 22f7605be..035c9041e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -25,7 +25,7 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error err = expandSandboxSize(&stdDriverInfo, path, size) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } // Manually expand the volume now in order to work around bugs in 19H1 and diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index 09f0de1a4..97b27eb7d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -35,7 +35,7 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index 4d22d0ecf..8d213f587 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -27,7 +27,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { log.G(ctx).Debug("Calling proc (1)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) if err != nil { - return "", hcserror.New(err, title+" - failed", "(first call)") + return "", hcserror.New(err, title, "(first call)") } // Allocate a mount path of the returned length. @@ -41,7 +41,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { log.G(ctx).Debug("Calling proc (2)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) if err != nil { - return "", hcserror.New(err, title+" - failed", "(second call)") + return "", hcserror.New(err, title, "(second call)") } mountPath := syscall.UTF16ToString(mountPathp[0:]) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index bcc8fbd42..ae1fff840 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -21,7 +21,7 @@ func GetSharedBaseImages(ctx context.Context) (_ string, err error) { var buffer *uint16 err = getBaseImages(&buffer) if err != nil { - return "", hcserror.New(err, title+" - failed", "") + return "", hcserror.New(err, title, "") } imageData := interop.ConvertAndFreeCoTaskMemString(buffer) span.AddAttributes(trace.StringAttribute("imageData", imageData)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index 3eaca2780..4b282fef9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -20,7 +20,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error err = grantVmAccess(vmid, filepath) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index b3c150d66..687550f0b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -36,7 +36,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare err = importLayer(&stdDriverInfo, path, importFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index c6999973c..01e672339 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -21,7 +21,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) { var exists uint32 err = layerExists(&stdDriverInfo, path, &exists) if err != nil { - return false, hcserror.New(err, title+" - failed", "") + return false, hcserror.New(err, title, "") } span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) return exists != 0, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 83ba72cfa..b7f3064f2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) { defer tf.Close() s := bufio.NewScanner(tf) if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") + return nil, errors.New("invalid tombstones file") } ts := make(map[string]([]string)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index bcf39c6b8..09950297c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -17,12 +17,12 @@ func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("name", name)) + span.AddAttributes(trace.StringAttribute("objectName", name)) var id guid.GUID err = nameToGuid(name, &id) if err != nil { - return guid.GUID{}, hcserror.New(err, title+" - failed", "") + return guid.GUID{}, hcserror.New(err, title, "") } span.AddAttributes(trace.StringAttribute("guid", id.String())) return id, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index 55f7730d0..90129faef 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -38,7 +38,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) ( defer prepareLayerLock.Unlock() err = prepareLayer(&stdDriverInfo, path, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index 79fb98678..71b130c52 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -19,7 +19,7 @@ func UnprepareLayer(ctx context.Context, path string) (err error) { err = unprepareLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 000000000..def952541 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go deleted file mode 100644 index 4e609cbf1..000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go index ba12b1ad9..7eb13f8f0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -24,7 +24,10 @@ const ( // Access rights for creating or opening job objects. // // https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights -const JOB_OBJECT_ALL_ACCESS = 0x1F001F +const ( + JOB_OBJECT_QUERY = 0x0004 + JOB_OBJECT_ALL_ACCESS = 0x1F001F +) // IO limit flags // @@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { // AllPids returns all the process Ids in the job object. func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { - return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] } // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information @@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // PBOOL Result // ); // -//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob // BOOL QueryInformationJobObject( // HANDLE hJob, @@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // LPDWORD lpReturnLength // ); // -//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject // HANDLE OpenJobObjectW( // DWORD dwDesiredAccess, diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go index 83f704064..53f62948c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -1,27 +1,4 @@ package winapi -// VOID RtlMoveMemory( -// _Out_ VOID UNALIGNED *Destination, -// _In_ const VOID UNALIGNED *Source, -// _In_ SIZE_T Length -// ); -//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory - //sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc //sys LocalFree(ptr uintptr) = kernel32.LocalFree - -// BOOL QueryWorkingSet( -// HANDLE hProcess, -// PVOID pv, -// DWORD cb -// ); -//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet - -type PSAPI_WORKING_SET_INFORMATION struct { - NumberOfEntries uintptr - WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK -} - -type PSAPI_WORKING_SET_BLOCK struct { - Flags uintptr -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go index b87068327..222529f43 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -2,9 +2,64 @@ package winapi const PROCESS_ALL_ACCESS uint32 = 2097151 -// DWORD GetProcessImageFileNameW( -// HANDLE hProcess, -// LPWSTR lpImageFileName, -// DWORD nSize +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) + +// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. +const ProcessVmCounters = 3 + +// __kernel_entry NTSTATUS NtQueryInformationProcess( +// [in] HANDLE ProcessHandle, +// [in] PROCESSINFOCLASS ProcessInformationClass, +// [out] PVOID ProcessInformation, +// [in] ULONG ProcessInformationLength, +// [out, optional] PULONG ReturnLength // ); -//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW +// +//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess + +// typedef struct _VM_COUNTERS_EX +// { +// SIZE_T PeakVirtualSize; +// SIZE_T VirtualSize; +// ULONG PageFaultCount; +// SIZE_T PeakWorkingSetSize; +// SIZE_T WorkingSetSize; +// SIZE_T QuotaPeakPagedPoolUsage; +// SIZE_T QuotaPagedPoolUsage; +// SIZE_T QuotaPeakNonPagedPoolUsage; +// SIZE_T QuotaNonPagedPoolUsage; +// SIZE_T PagefileUsage; +// SIZE_T PeakPagefileUsage; +// SIZE_T PrivateUsage; +// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; +// +type VM_COUNTERS_EX struct { + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +// typedef struct _VM_COUNTERS_EX2 +// { +// VM_COUNTERS_EX CountersEx; +// SIZE_T PrivateWorkingSetSize; +// SIZE_T SharedCommitUsage; +// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; +// +type VM_COUNTERS_EX2 struct { + CountersEx VM_COUNTERS_EX + PrivateWorkingSetSize uintptr + SharedCommitUsage uintptr +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go index 327f57d7c..78fe01a4b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 // ULONG SystemInformationLength, // PULONG ReturnLength // ); -//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation +// +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation type SYSTEM_PROCESS_INFORMATION struct { NextEntryOffset uint32 // ULONG diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index db59567d0..859b753c2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { return } +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string type UnicodeString struct { Length uint16 MaximumLength uint16 Buffer *uint16 } +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + //String converts a UnicodeString to a golang string func (uni UnicodeString) String() string { // UnicodeString is not guaranteed to be null terminated, therefore // use the UnicodeString's Length field - return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) } // NewUnicodeString allocates a new UnicodeString and copies `s` into // the buffer of the new UnicodeString. func NewUnicodeString(s string) (*UnicodeString, error) { - // Get length of original `s` to use in the UnicodeString since the `buf` - // created later will have an additional trailing null character - length := len(s) - if length > 32767 { - return nil, syscall.ENAMETOOLONG - } - buf, err := windows.UTF16FromString(s) if err != nil { return nil, err } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + uni := &UnicodeString{ - Length: uint16(length * 2), - MaximumLength: uint16(length * 2), + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), Buffer: &buf[0], } return uni, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go index ec88c0d21..d2cc9d9fb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -2,4 +2,4 @@ // be thought of as an extension to golang.org/x/sys/windows. package winapi -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index 2941b0f98..1f16cf0b8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -37,18 +37,19 @@ func errnoErr(e syscall.Errno) error { } var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modpsapi = windows.NewLazySystemDLL("psapi.dll") modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procSearchPathW = modkernel32.NewProc("SearchPathW") procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") @@ -57,11 +58,9 @@ var ( procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") procLogonUserW = modadvapi32.NewProc("LogonUserW") - procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory") procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalFree = modkernel32.NewProc("LocalFree") - procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet") - procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") @@ -74,7 +73,34 @@ var ( procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") ) -func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) status = uint32(r0) return @@ -114,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, return } -func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) if r1 == 0 { if e1 != 0 { @@ -138,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result return } -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) if r1 == 0 { if e1 != 0 { @@ -219,18 +233,6 @@ func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uin return } -func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func LocalAlloc(flags uint32, size int) (ptr uintptr) { r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) ptr = uintptr(r0) @@ -242,28 +244,9 @@ func LocalFree(ptr uintptr) { return } -func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize)) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + status = uint32(r0) return } diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index e9267b955..75dce5d82 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -35,4 +35,16 @@ const ( // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 ) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 96714c2fc..6f9048564 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,19 +17,15 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" - UBUNTU_NAME: "ubuntu-2104" - PRIOR_UBUNTU_NAME: "ubuntu-2010" + FEDORA_NAME: "fedora-36" + UBUNTU_NAME: "ubuntu-2204" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - _BUILT_IMAGE_SUFFIX: "c6032583541653504" - FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${_BUILT_IMAGE_SUFFIX}" + IMAGE_SUFFIX: "c5878804328480768" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -76,6 +72,8 @@ fedora_testing_task: &fedora_testing TEST_DRIVER: "vfs" - env: TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "overlay-transient" - env: TEST_DRIVER: "fuse-overlay" - env: @@ -92,15 +90,6 @@ fedora_testing_task: &fedora_testing journal_log_script: '${_JOURNALCMD} || true' -prior_fedora_testing_task: - <<: *fedora_testing - alias: prior_fedora_testing - name: *std_test_name - env: - OS_NAME: "${PRIOR_FEDORA_NAME}" - VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - - ubuntu_testing_task: &ubuntu_testing <<: *fedora_testing alias: ubuntu_testing @@ -115,20 +104,11 @@ ubuntu_testing_task: &ubuntu_testing TEST_DRIVER: "overlay" -prior_ubuntu_testing_task: - <<: *ubuntu_testing - alias: prior_ubuntu_testing - name: *std_test_name - env: - OS_NAME: "${PRIOR_UBUNTU_NAME}" - VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - - lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.15 + image: golang:1.17 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -136,14 +116,14 @@ lint_task: echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list apt-get update apt-get install -y libbtrfs-dev libdevmapper-dev - test_script: make lint + test_script: make TAGS=regex_precompile local-validate && make lint && make clean # Update metadata on VM images referenced by this repository state meta_task: container: - image: "quay.io/libpod/imgts:master" + image: "quay.io/libpod/imgts:latest" cpu: 1 memory: 1 @@ -151,9 +131,7 @@ meta_task: # Space-separated list of images used by this repository state IMGNAMES: |- ${FEDORA_CACHE_IMAGE_NAME} - ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} - ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] @@ -166,7 +144,7 @@ meta_task: vendor_task: container: - image: golang:1.15 + image: golang:1.17 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -179,12 +157,10 @@ success_task: depends_on: - lint - fedora_testing - - prior_fedora_testing - ubuntu_testing - - prior_ubuntu_testing - meta - vendor container: - image: golang:1.15 + image: golang:1.17 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md index be0791620..f4f7df4b8 100644 --- a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md +++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md @@ -1,3 +1,3 @@ ## The Containers Storage Project Community Code of Conduct -The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). +The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 581961fed..ea2bb6406 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -29,7 +29,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 NATIVETAGS := -AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) @@ -51,13 +51,16 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*. containers-storage: $(sources) ## build using gc on the host $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage -local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd - @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ +local-cross: ## cross build the binaries for arm, darwin, and freebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ @@ -66,57 +69,57 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd done cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ docs: install.tools ## build the docs on the host $(MAKE) -C docs docs gccgo: ## build using gccgo using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ test: local-binary ## build the binaries and run the tests using VMs - $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) test-unit: local-binary ## run the unit tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash test-integration: local-binary ## run the integration tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ -local-validate: ## validate DCO and gofmt on the host +local-validate: install.tools ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ install.tools: - make -C tests/tools + $(MAKE) -C tests/tools $(FFJSON): - make -C tests/tools + $(MAKE) -C tests/tools install.docs: docs - make -C docs install + $(MAKE) -C docs install install: install.docs lint: install.tools - tests/tools/build/golangci-lint run + tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)" help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor vendor: - $(GO) mod tidy + $(GO) mod tidy -compat=1.17 $(GO) mod vendor $(GO) mod verify diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md index 1496a4c00..ab2c14182 100644 --- a/vendor/github.com/containers/storage/SECURITY.md +++ b/vendor/github.com/containers/storage/SECURITY.md @@ -1,3 +1,3 @@ ## Security and Disclosure Information Policy for the Containers Storage Project -The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. +The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 96cd6ee1e..53999456e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.32.1 +1.45.3 diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile deleted file mode 100644 index c82c1f81b..000000000 --- a/vendor/github.com/containers/storage/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# The fedora/28-cloud-base and debian/jessie64 boxes are also available for -# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to -# "virtualbox" to use them instead. -# -Vagrant.configure("2") do |config| - config.vm.define "fedora" do |c| - c.vm.box = "fedora/28-cloud-base" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end - config.vm.define "debian" do |c| - c.vm.box = "debian/jessie64" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end -end diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index b4f773f2b..106d1d152 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -1,8 +1,8 @@ package storage import ( + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -10,12 +10,28 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) +type containerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableContainerLocation containerLocations = 1 << iota + volatileContainerLocation + + numContainerLocationIndex = iota +) + +func containerLocationFromIndex(index int) containerLocations { + return 1 << index +} + // A Container is a reference to a read-write layer with metadata. type Container struct { // ID is either one which was specified at create-time, or a random @@ -65,14 +81,30 @@ type Container struct { GIDMap []idtools.IDMap `json:"gidmap,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"` + + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` } -// ContainerStore provides bookkeeping for information about Containers. -type ContainerStore interface { - FileBasedStore - MetadataStore - ContainerBigDataStore - FlaggableStore +// rwContainerStore provides bookkeeping for information about Containers. +type rwContainerStore interface { + metadataStore + containerBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Create creates a container that has a specified ID (or generates a // random one if an empty value is supplied) and optional names, @@ -82,9 +114,8 @@ type ContainerStore interface { // convenience of the caller, nothing more. Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) - // SetNames updates the list of names associated with the container - // with the specified ID. - SetNames(id string, names []string) error + // updateNames modifies names associated with a container based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -104,17 +135,27 @@ type ContainerStore interface { // Containers returns a slice enumerating the known containers. Containers() ([]Container, error) + + // Clean up unreferenced datadirs + GarbageCollect() error } type containerStore struct { - lockfile Locker - dir string + // The following fields are only set when constructing containerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process. + dir string + jsonPath [numContainerLocationIndex]string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite containers []*Container idindex *truncindex.TruncIndex byid map[string]*Container bylayer map[string]*Container byname map[string]*Container - loadMut sync.Mutex } func copyContainer(c *Container) *Container { @@ -131,30 +172,31 @@ func copyContainer(c *Container) *Container { UIDMap: copyIDMap(c.UIDMap), GIDMap: copyIDMap(c.GIDMap), Flags: copyStringInterfaceMap(c.Flags), + volatileStore: c.volatileStore, } } func (c *Container) MountLabel() string { - if label, ok := c.Flags["MountLabel"].(string); ok { + if label, ok := c.Flags[mountLabelFlag].(string); ok { return label } return "" } func (c *Container) ProcessLabel() string { - if label, ok := c.Flags["ProcessLabel"].(string); ok { + if label, ok := c.Flags[processLabelFlag].(string); ok { return label } return "" } func (c *Container) MountOpts() []string { - switch c.Flags["MountOpts"].(type) { + switch value := c.Flags[mountOptsFlag].(type) { case []string: - return c.Flags["MountOpts"].([]string) + return value case []interface{}: var mountOpts []string - for _, v := range c.Flags["MountOpts"].([]interface{}) { + for _, v := range value { if flag, ok := v.(string); ok { mountOpts = append(mountOpts, flag) } @@ -165,6 +207,187 @@ func (c *Container) MountOpts() []string { } } +// The caller must hold r.inProcessLock for reading. +func containerLocation(c *Container) containerLocations { + if c.volatileStore { + return volatileContainerLocation + } + return stableContainerLocation +} + +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of containerStore construction, every other caller +// should use startWriting() instead. +func (r *containerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *containerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *containerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *containerStore) startReading() error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil. + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate container names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + unlockFn = nil + return nil +} + +// stopReading releases locks obtained by startReading. +func (r *containerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *containerStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. func (r *containerStore) Containers() ([]Container, error) { containers := make([]Container, len(r.containers)) for i := range r.containers { @@ -173,8 +396,38 @@ func (r *containerStore) Containers() ([]Container, error) { return containers, nil } -func (r *containerStore) containerspath() string { - return filepath.Join(r.dir, "containers.json") +// This looks for datadirs in the store directory that are not referenced +// by the json file and removes it. These can happen in the case of unclean +// shutdowns or regular restarts in transient store mode. +// Requires startReading. +func (r *containerStore) GarbageCollect() error { + entries, err := os.ReadDir(r.dir) + if err != nil { + // Unexpected, don't try any GC + return err + } + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + // Should the id be there? + if r.byid[id] != nil { + continue + } + + // Otherwise remove datadir + moreErr := os.RemoveAll(filepath.Join(r.dir, id)) + // Propagate first error + if moreErr != nil && err == nil { + err = moreErr + } + } + + return err } func (r *containerStore) datadir(id string) string { @@ -185,84 +438,178 @@ func (r *containerStore) datapath(id, key string) string { return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) } -func (r *containerStore) Load() error { - needSave := false - rpath := r.containerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *containerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations containerLocations containers := []*Container{} - layers := make(map[string]*Container) - idlist := []string{} + ids := make(map[string]*Container) + + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationContainers := []*Container{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationContainers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + + for _, container := range locationContainers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[container.ID] != nil { + continue // skip invalid duplicated container + } + // Remember where the container came from + if location == volatileContainerLocation { + container.volatileStore = true + } + containers = append(containers, container) + ids[container.ID] = container + } + } + + idlist := make([]string, 0, len(containers)) + layers := make(map[string]*Container) names := make(map[string]*Container) - if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(containers)) - for n, container := range containers { - idlist = append(idlist, container.ID) - ids[container.ID] = containers[n] - layers[container.LayerID] = containers[n] - for _, name := range container.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - needSave = true - } - names[name] = containers[n] + var errorToResolveBySaving error // == nil + for n, container := range containers { + idlist = append(idlist, container.ID) + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock") + modifiedLocations |= containerLocation(container) } + names[name] = containers[n] } } + r.containers = containers - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.bylayer = layers r.byname = names - if needSave { - return r.Save() + if errorToResolveBySaving != nil { + if !lockedForWriting { + return true, errorToResolveBySaving + } + return false, r.save(modifiedLocations) } - return nil + return false, nil } -func (r *containerStore) Save() error { - if !r.Locked() { - return errors.New("container store is not locked") - } - rpath := r.containerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) save(saveLocations containerLocations) error { + r.lockfile.AssertLockedForWriting() + for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + location := containerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + subsetContainers := make([]*Container, 0, len(r.containers)) + for _, container := range r.containers { + if containerLocation(container) == location { + subsetContainers = append(subsetContainers, container) + } + } + + jdata, err := json.Marshal(&subsetContainers) + if err != nil { + return err + } + var opts *ioutils.AtomicFileWriterOptions + if location == volatileContainerLocation { + opts = &ioutils.AtomicFileWriterOptions{ + NoSync: true, + } + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil { + return err + } } - jdata, err := json.Marshal(&r.containers) + lw, err := r.lockfile.RecordWrite() if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) + r.lastWrite = lw + return nil +} + +// saveFor saves the contents of the store relevant for modifiedContainer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). +func (r *containerStore) saveFor(modifiedContainer *Container) error { + return r.save(containerLocation(modifiedContainer)) } -func newContainerStore(dir string) (ContainerStore, error) { +func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { if err := os.MkdirAll(dir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + volatileDir := dir + if transient { + if err := os.MkdirAll(runDir, 0700); err != nil { + return nil, err + } + volatileDir = runDir + } + lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock")) if err != nil { return nil, err } - lockfile.Lock() - defer lockfile.Unlock() cstore := containerStore{ - lockfile: lockfile, - dir: dir, + lockfile: lockfile, + dir: dir, + jsonPath: [numContainerLocationIndex]string{ + filepath.Join(dir, "containers.json"), + filepath.Join(volatileDir, "volatile-containers.json"), + }, + containers: []*Container{}, byid: make(map[string]*Container), bylayer: make(map[string]*Container), byname: make(map[string]*Container), } - if err := cstore.Load(); err != nil { + + if err := cstore.startWritingWithReload(false); err != nil { + return nil, err + } + cstore.lastWrite, err = cstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + defer cstore.stopWriting() + if _, err := cstore.load(true); err != nil { return nil, err } return &cstore, nil } +// Requires startReading or startWriting. func (r *containerStore) lookup(id string) (*Container, bool) { if container, ok := r.byid[id]; ok { return container, ok @@ -278,15 +625,17 @@ func (r *containerStore) lookup(id string) (*Container, bool) { return nil, false } +// Requires startWriting. func (r *containerStore) ClearFlag(id string, flag string) error { container, ok := r.lookup(id) if !ok { return ErrContainerUnknown } delete(container.Flags, flag) - return r.Save() + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { container, ok := r.lookup(id) if !ok { @@ -296,9 +645,10 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro container.Flags = make(map[string]interface{}) } container.Flags[flag] = value - return r.Save() + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { if id == "" { id = stringid.GenerateRandomID() @@ -312,46 +662,53 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat return nil, ErrDuplicateID } if options.MountOpts != nil { - options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...) } if options.Volatile { - options.Flags["Volatile"] = true + options.Flags[volatileFlag] = true } names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { - return nil, errors.Wrapf(ErrDuplicateName, - fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) + return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName) } } - if err == nil { - container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: time.Now().UTC(), - Flags: copyStringInterfaceMap(options.Flags), - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - } - r.containers = append(r.containers, container) - r.byid[id] = container - r.idindex.Add(id) - r.bylayer[layer] = container - for _, name := range names { - r.byname[name] = container - } - err = r.Save() - container = copyContainer(container) + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err } + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + volatileStore: options.Volatile, + } + r.containers = append(r.containers, container) + r.byid[id] = container + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.saveFor(container) + container = copyContainer(container) return container, err } +// Requires startReading or startWriting. func (r *containerStore) Metadata(id string) (string, error) { if container, ok := r.lookup(id); ok { return container.Metadata, nil @@ -359,36 +716,45 @@ func (r *containerStore) Metadata(id string) (string, error) { return "", ErrContainerUnknown } +// Requires startWriting. func (r *containerStore) SetMetadata(id, metadata string) error { if container, ok := r.lookup(id); ok { container.Metadata = metadata - return r.Save() + return r.saveFor(container) } return ErrContainerUnknown } +// The caller must hold r.inProcessLock for writing. func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } -func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container +// Requires startWriting. +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) } - container.Names = names - return r.Save() + r.byname[name] = container } - return ErrContainerUnknown + container.Names = names + return r.saveFor(container) } +// Requires startWriting. func (r *containerStore) Delete(id string) error { container, ok := r.lookup(id) if !ok { @@ -403,7 +769,9 @@ func (r *containerStore) Delete(id string) error { } } delete(r.byid, id) - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) delete(r.bylayer, container.LayerID) for _, name := range container.Names { delete(r.byname, name) @@ -416,7 +784,7 @@ func (r *containerStore) Delete(id string) error { r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) } } - if err := r.Save(); err != nil { + if err := r.saveFor(container); err != nil { return err } if err := os.RemoveAll(r.datadir(id)); err != nil { @@ -425,6 +793,7 @@ func (r *containerStore) Delete(id string) error { return nil } +// Requires startReading or startWriting. func (r *containerStore) Get(id string) (*Container, error) { if container, ok := r.lookup(id); ok { return copyContainer(container), nil @@ -432,6 +801,7 @@ func (r *containerStore) Get(id string) (*Container, error) { return nil, ErrContainerUnknown } +// Requires startReading or startWriting. func (r *containerStore) Lookup(name string) (id string, err error) { if container, ok := r.lookup(name); ok { return container.ID, nil @@ -439,34 +809,34 @@ func (r *containerStore) Lookup(name string) (id string, err error) { return "", ErrContainerUnknown } +// Requires startReading or startWriting. func (r *containerStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *containerStore) BigData(id, key string) ([]byte, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { return nil, ErrContainerUnknown } - return ioutil.ReadFile(r.datapath(c.ID, key)) + return os.ReadFile(r.datapath(c.ID, key)) } +// Requires startWriting. Yes, really, WRITING (see SetBigData). func (r *containerStore) BigDataSize(id, key string) (int64, error) { if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { return -1, ErrContainerUnknown } - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } - if size, ok := c.BigDataSizes[key]; ok { + if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -485,18 +855,16 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } +// Requires startWriting. Yes, really, WRITING (see SetBigData). func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { return "", ErrContainerUnknown } - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := c.BigDataDigests[key]; ok { + if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return d, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -515,6 +883,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { return "", ErrDigestUnknown } +// Requires startReading or startWriting. func (r *containerStore) BigDataNames(id string) ([]string, error) { c, ok := r.lookup(id) if !ok { @@ -523,9 +892,10 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { return copyStringSlice(c.BigDataNames), nil } +// Requires startWriting. func (r *containerStore) SetBigData(id, key string, data []byte) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { @@ -563,12 +933,13 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { save = true } if save { - err = r.Save() + err = r.saveFor(c) } } return err } +// Requires startWriting. func (r *containerStore) Wipe() error { ids := make([]string, 0, len(r.byid)) for id := range r.byid { @@ -581,50 +952,3 @@ func (r *containerStore) Wipe() error { } return nil } - -func (r *containerStore) Lock() { - r.lockfile.Lock() -} - -func (r *containerStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *containerStore) RLock() { - r.lockfile.RLock() -} - -func (r *containerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *containerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *containerStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *containerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *containerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *containerStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *containerStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err -} diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go new file mode 100644 index 000000000..04972d838 --- /dev/null +++ b/vendor/github.com/containers/storage/deprecated.go @@ -0,0 +1,216 @@ +package storage + +import ( + "io" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" +) + +// The type definitions in this file exist ONLY to maintain formal API compatibility. +// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES. + +// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROFileBasedStore interface { + Locker + Load() error + ReloadIfChanged() error +} + +// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWFileBasedStore interface { + Save() error +} + +// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROMetadataStore interface { + Metadata(id string) (string, error) +} + +// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWMetadataStore interface { + SetMetadata(id, metadata string) error +} + +// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROBigDataStore interface { + BigData(id, key string) ([]byte, error) + BigDataSize(id, key string) (int64, error) + BigDataDigest(id, key string) (digest.Digest, error) + BigDataNames(id string) ([]string, error) +} + +// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWImageBigDataStore interface { + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerBigDataStore interface { + ROBigDataStore + SetBigData(id, key string, data []byte) error +} + +// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerBigDataStore interface { + BigData(id, key string) (io.ReadCloser, error) + BigDataNames(id string) ([]string, error) +} + +// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type RWLayerBigDataStore interface { + SetBigData(id, key string, data io.Reader) error +} + +// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerBigDataStore interface { + ROLayerBigDataStore + RWLayerBigDataStore +} + +// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type FlaggableStore interface { + ClearFlag(id string, flag string) error + SetFlag(id string, flag string, value interface{}) error +} + +// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Get(id string) (*Container, error) + Exists(id string) bool + Delete(id string) error + Wipe() error + Lookup(name string) (string, error) + Containers() ([]Container, error) +} + +// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + Exists(id string) bool + Get(id string) (*Image, error) + Lookup(name string) (string, error) + Images() ([]Image, error) + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error +} + +// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + ROLayerBigDataStore + Exists(id string) bool + Get(id string) (*Layer, error) + Status() ([][2]string, error) + Changes(from, to string) ([]archive.Change, error) + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + DiffSize(from, to string) (int64, error) + Size(name string) (int64, error) + Lookup(name string) (string, error) + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + Layers() ([]Layer, error) +} + +// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. +// +// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + RWLayerBigDataStore + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + SetNames(id string, names []string) error + AddNames(id string, names []string) error + RemoveNames(id string, names []string) error + Delete(id string) error + Wipe() error + Mount(id string, options drivers.MountOpts) (string, error) + Unmount(id string, force bool) (bool, error) + Mounted(id string) (int, error) + ParentOwners(id string) (uids, gids []int, err error) + ApplyDiff(to string, diff io.Reader) (int64, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + CleanupStagingDirectory(stagingDirectory string) error + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + DifferTarget(id string) (string, error) + LoadLocked() error + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index afbd23a75..10341d41a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -24,9 +25,10 @@ package aufs import ( "bufio" + "errors" "fmt" "io" - "io/ioutil" + "io/fs" "os" "os/exec" "path" @@ -44,9 +46,8 @@ import ( mountpk "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" @@ -54,9 +55,9 @@ import ( var ( // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems") // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace") backingFs = "" enableDirpermLock sync.Once @@ -66,7 +67,7 @@ var ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("aufs", Init) + graphdriver.MustRegister("aufs", Init) } // Driver contains information about the filesystem mounted. @@ -89,7 +90,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Try to load the aufs kernel module if err := supportsAufs(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) } @@ -104,7 +105,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) + return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS) } var mountOptions string @@ -168,7 +169,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) for _, path := range []string{"mnt", "diff"} { p := filepath.Join(home, path) - entries, err := ioutil.ReadDir(p) + entries, err := os.ReadDir(p) if err != nil { logger.WithError(err).WithField("dir", p).Error("error reading dir entries") continue @@ -198,7 +199,7 @@ func supportsAufs() error { // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() - if rsystem.RunningInUserNS() { + if unshare.IsRootless() { return ErrAufsNested } @@ -250,6 +251,11 @@ func (a *Driver) Exists(id string) bool { return true } +// List layers (not including additional image stores) +func (a *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (a *Driver) AdditionalImageStores() []string { return nil @@ -372,10 +378,10 @@ func (a *Driver) Remove(id string) error { } if err != unix.EBUSY { - return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err) } if retries >= 5 { - return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err) } // If unmount returns EBUSY, it could be a transient error. Sleep and retry. retries++ @@ -385,21 +391,21 @@ func (a *Driver) Remove(id string) error { // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "error removing layers dir for %s", id) + return fmt.Errorf("removing layers dir for %s: %w", id, err) } if err := atomicRemove(a.getDiffPath(id)); err != nil { - return errors.Wrapf(err, "could not remove diff path for id %s", id) + return fmt.Errorf("could not remove diff path for id %s: %w", id, err) } // Atomically remove each directory in turn by first moving it out of the // way (so that container runtime doesn't find it anymore) before doing removal of // the whole tree. if err := atomicRemove(mountpoint); err != nil { - if errors.Cause(err) == unix.EBUSY { + if errors.Is(err, unix.EBUSY) { logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") } - return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err) } a.pathCacheLock.Lock() @@ -417,10 +423,10 @@ func atomicRemove(source string) error { case os.IsExist(err): // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove if _, e := os.Stat(source); !os.IsNotExist(e) { - return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target) + return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) } default: - return errors.Wrapf(err, "error preparing atomic delete") + return fmt.Errorf("preparing atomic delete: %w", err) } return system.EnsureRemoveAll(target) @@ -624,7 +630,7 @@ func (a *Driver) mount(id string, target string, layers []string, options graphd rw := a.getDiffPath(id) if err := a.aufsMount(layers, rw, target, options); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + return fmt.Errorf("creating aufs mount to %s: %w", target, err) } return nil } @@ -649,11 +655,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) { // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { if err != nil { return err } - if !info.IsDir() { + if !d.IsDir() { return nil } dirs = append(dirs, path) @@ -728,16 +734,16 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "storage-aufs-base") + base, err := os.MkdirTemp("", "storage-aufs-base") if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) + logrus.Errorf("Checking dirperm1: %v", err) return } defer os.RemoveAll(base) - union, err := ioutil.TempDir("", "storage-aufs-union") + union, err := os.MkdirTemp("", "storage-aufs-union") if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) + logrus.Errorf("Checking dirperm1: %v", err) return } defer os.RemoveAll(union) @@ -748,7 +754,7 @@ func useDirperm() bool { } enableDirperm = true if err := Unmount(union); err != nil { - logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + logrus.Errorf("Checking dirperm1: failed to unmount %v", err) } }) return enableDirperm diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go index d2325fc46..27e621633 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -1,17 +1,17 @@ +//go:build linux // +build linux package aufs import ( "bufio" - "io/ioutil" "os" "path" ) // Return all the directories func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) + dirs, err := os.ReadDir(root) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go index 100e7537a..156f4a4f0 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package aufs diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go deleted file mode 100644 index d030b0663..000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 3903b1ddd..3d9053297 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package btrfs @@ -5,6 +6,9 @@ package btrfs /* #include #include + +// keep struct field name compatible with btrfs-progs < 6.1. +#define max_referenced max_rfer #include #include @@ -16,7 +20,7 @@ import "C" import ( "fmt" - "io/ioutil" + "io/fs" "math" "os" "path" @@ -34,7 +38,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -42,7 +45,7 @@ import ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("btrfs", Init) + graphdriver.MustRegister("btrfs", Init) } type btrfsOptions struct { @@ -60,7 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } if fsMagic != graphdriver.FsMagicBtrfs { - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) + return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites) } rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) @@ -116,7 +119,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { case "btrfs.mountopt": return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") default: - return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("unknown option %s", key) } } return options, userDiskQuota, nil @@ -172,7 +175,7 @@ func openDir(path string) (*C.DIR, error) { dir := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("Can't open dir") + return nil, fmt.Errorf("can't open dir %s", path) } return dir, nil } @@ -202,7 +205,7 @@ func subvolCreate(path, name string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + return fmt.Errorf("failed to create btrfs subvolume: %w", errno) } return nil } @@ -230,7 +233,7 @@ func subvolSnapshot(src, dest, name string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + return fmt.Errorf("failed to create btrfs snapshot: %w", errno) } return nil } @@ -256,32 +259,32 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration // since it's gone anyway, we don't care return nil } - return fmt.Errorf("error walking subvolumes: %v", err) + return fmt.Errorf("walking subvolumes: %w", err) } // we want to check children only so skip itself // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { + if d.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { + return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err) } } } return nil } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err) } if quotaEnabled { @@ -307,7 +310,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno) } return nil } @@ -343,7 +346,7 @@ func (d *Driver) enableQuota() error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno) } d.quotaEnabled = true @@ -368,7 +371,7 @@ func (d *Driver) subvolRescanQuota() error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno) } return nil @@ -382,12 +385,12 @@ func subvolLimitQgroup(path string, size uint64) error { defer closeDir(dir) var args C.struct_btrfs_ioctl_qgroup_limit_args - args.lim.max_referenced = C.__u64(size) + args.lim.max_rfer = C.__u64(size) args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno) } return nil @@ -416,11 +419,11 @@ func qgroupStatus(path string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + return fmt.Errorf("failed to search qgroup for %s: %w", path, errno) } sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) if sh._type != C.BTRFS_QGROUP_STATUS_KEY { - return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type) } return nil } @@ -438,10 +441,10 @@ func subvolLookupQgroup(path string) (uint64, error) { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno) } if args.treeid == 0 { - return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir) } return uint64(args.treeid), nil @@ -523,7 +526,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { return err } - if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { return err } } @@ -557,7 +560,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e } driver.options.size = uint64(size) default: - return fmt.Errorf("Unknown option %s", key) + return fmt.Errorf("unknown option %s", key) } } @@ -642,7 +645,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return "", fmt.Errorf("%s: not a directory", dir) } - if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil { if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { if err := d.enableQuota(); err != nil { return "", err @@ -676,6 +679,11 @@ func (d *Driver) Exists(id string) bool { return err == nil } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go index f07088887..c7d9d3b84 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !cgo // +build !linux !cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go index edd8bdab8..5816139f3 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -1,3 +1,4 @@ +//go:build linux && !btrfs_noversion && cgo // +build linux,!btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go index 905e834e3..a61d8fbd9 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -1,4 +1,5 @@ -// +build !linux btrfs_noversion !cgo +//go:build linux && btrfs_noversion && cgo +// +build linux,btrfs_noversion,cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 63bfd2d13..ca43c3f05 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -2,6 +2,7 @@ package graphdriver import ( "bytes" + "errors" "fmt" "os" @@ -50,11 +51,14 @@ func chownByMapsMain() { if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { toHost = nil } + + chowner := newLChowner() + chown := func(path string, info os.FileInfo, _ error) error { if path == "." { return nil } - return platformLChown(path, info, toHost, toContainer) + return chowner.LChown(path, info, toHost, toContainer) } if err := pwalk.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) @@ -84,13 +88,13 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error cmd.Stdin = bytes.NewReader(config) output, err := cmd.CombinedOutput() if len(output) > 0 && err != nil { - return fmt.Errorf("%v: %s", err, string(output)) + return fmt.Errorf("%s: %w", string(output), err) } if err != nil { return err } if len(output) > 0 { - return fmt.Errorf("%s", string(output)) + return errors.New(string(output)) } return nil @@ -111,7 +115,7 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { // on-disk owner UIDs and GIDs which are "host" values in the first map with // UIDs and GIDs for "host" values from the second map which correspond to the // same "container" IDs. -func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) { driver := n.ProtoDriver options := MountOpts{ MountLabel: mountLabel, @@ -120,9 +124,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost if err != nil { return err } - defer func() { - driver.Put(id) - }() + defer driverPut(driver, id, &retErr) return ChownPathByMaps(layerFs, toContainer, toHost) } diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go new file mode 100644 index 000000000..d6150ceee --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -0,0 +1,109 @@ +//go:build darwin +// +build darwin + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHostOverflow(pair) + if err != nil { + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + capability, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + if capability != nil { + if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 0387adfc1..42c12c627 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package graphdriver @@ -6,17 +7,68 @@ import ( "errors" "fmt" "os" + "sync" "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]string +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]string), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { st, ok := info.Sys().(*syscall.Stat_t) if !ok { return nil } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + + c.mutex.Lock() + + oldTarget, found := c.inodes[i] + if !found { + c.inodes[i] = path + } + + // If we are dealing with a file with multiple links then keep the lock until the file is + // chowned to avoid a race where we link to the old version if the file is copied up. + if found || st.Nlink > 1 { + defer c.mutex.Unlock() + } else { + c.mutex.Unlock() + } + + if found { + // If the dev/inode was already chowned then create a link to the old target instead + // of chowning it again. This is necessary when the underlying file system breaks + // inodes on copy-up (as it is with overlay with index=off) to maintain the original + // link and correct file ownership. + + // The target already exists so remove it before creating the link to the new target. + if err := os.Remove(path); err != nil { + return err + } + return os.Link(oldTarget, path) + } + // Map an on-disk UID/GID pair from host to container // using the first map, then back to the host using the // second map. Skip that first step if they're 0, to @@ -31,7 +83,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. mappedUID, mappedGID, err := toContainer.ToContainer(pair) if err != nil { if (uid != 0) || (gid != 0) { - return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) } mappedUID, mappedGID = uid, gid } @@ -42,31 +94,31 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. UID: uid, GID: gid, } - mappedPair, err := toHost.ToHost(pair) + mappedPair, err := toHost.ToHostOverflow(pair) if err != nil { - return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) } uid, gid = mappedPair.UID, mappedPair.GID } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { - return fmt.Errorf("%s: %v", os.Args[0], err) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) } // Make the change. if err := system.Lchown(path, uid, gid); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } // Restore the SUID and SGID bits if they were originally set. if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { if err := system.Chmod(path, info.Mode()); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } } if cap != nil { if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } } diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 31bd5bb52..1845a4e08 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package graphdriver @@ -9,6 +10,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { return &os.PathError{"lchown", path, syscall.EWINDOWS} } diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go index c8c4905bf..9a1c6751f 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_unix.go +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package graphdriver @@ -12,10 +13,10 @@ import ( // specified path followed by chdir() to the new root directory func chrootOrChdir(path string) error { if err := syscall.Chroot(path); err != nil { - return fmt.Errorf("error chrooting to %q: %v", path, err) + return fmt.Errorf("chrooting to %q: %w", path, err) } if err := syscall.Chdir(string(os.PathSeparator)); err != nil { - return fmt.Errorf("error changing to %q: %v", path, err) + return fmt.Errorf("changing to %q: %w", path, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go index f4dc22a96..d5eb5ed98 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_windows.go +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -9,7 +9,7 @@ import ( // specified path followed by chdir() to the new root directory func chrootOrChdir(path string) error { if err := syscall.Chdir(path); err != nil { - return fmt.Errorf("error changing to %q: %v", path, err) + return fmt.Errorf("changing to %q: %w", path, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index c2156861f..58c0b5daf 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -1,3 +1,4 @@ +//go:build cgo // +build cgo package copy @@ -26,7 +27,6 @@ import ( "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" - rsystem "github.com/opencontainers/runc/libcontainer/system" "golang.org/x/sys/unix" ) @@ -154,7 +154,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { dstPath := filepath.Join(dstDir, relPath) stat, ok := f.Sys().(*syscall.Stat_t) if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false @@ -206,7 +206,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { s.Close() case mode&os.ModeDevice != 0: - if rsystem.RunningInUserNS() { + if unshare.IsRootless() { // cannot create a device if running in user namespace return nil } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go index e97523c35..470e6a724 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -1,6 +1,7 @@ +//go:build !linux || !cgo // +build !linux !cgo -package copy +package copy //nolint: predeclared import ( "io" @@ -24,7 +25,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { } // CopyRegularToFile copies the content of a file to another -func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" f, err := os.Open(srcPath) if err != nil { return err @@ -35,6 +36,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c } // CopyRegular copies the content of a file to another -func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters" return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) } diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 3fc45495b..015766676 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -58,6 +58,11 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { } infoOp(m) count := m.count + if count <= 0 { + // If the mounted path has been decremented enough have no references, + // then its entry can be removed. + delete(c.counts, path) + } c.mu.Unlock() return count } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index c23097b76..96c4cdacb 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper @@ -5,14 +6,13 @@ package devmapper import ( "bufio" "bytes" + "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -59,7 +59,7 @@ func checkDevAvailable(dev string) error { } if !bytes.Contains(out, []byte(dev)) { - return errors.Errorf("%s is not available for use with devicemapper", dev) + return fmt.Errorf("%s is not available for use with devicemapper", dev) } return nil } @@ -84,7 +84,7 @@ func checkDevInVG(dev string) error { // got "VG Name" line" vg := strings.TrimSpace(fields[1]) if len(vg) > 0 { - return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) } logrus.Error(fields) break @@ -112,7 +112,7 @@ func checkDevHasFS(dev string) error { if bytes.Equal(kv[0], []byte("TYPE")) { v := bytes.Trim(kv[1], "\"") if len(v) > 0 { - return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) } return nil } @@ -123,16 +123,16 @@ func checkDevHasFS(dev string) error { func verifyBlockDevice(dev string, force bool) error { absPath, err := filepath.Abs(dev) if err != nil { - return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + return fmt.Errorf("unable to get absolute path for %s: %s", dev, err) } realPath, err := filepath.EvalSymlinks(absPath) if err != nil { - return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err) } if err := checkDevAvailable(absPath); err != nil { logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) if err := checkDevAvailable(realPath); err != nil { - return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath) + return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath) } } if err := checkDevInVG(realPath); err != nil { @@ -153,31 +153,34 @@ func readLVMConfig(root string) (directLVMConfig, error) { var cfg directLVMConfig p := filepath.Join(root, "setup-config.json") - b, err := ioutil.ReadFile(p) + b, err := os.ReadFile(p) if err != nil { if os.IsNotExist(err) { return cfg, nil } - return cfg, errors.Wrap(err, "error reading existing setup config") + return cfg, fmt.Errorf("reading existing setup config: %w", err) } // check if this is just an empty file, no need to produce a json error later if so if len(b) == 0 { return cfg, nil } - - err = json.Unmarshal(b, &cfg) - return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") + if err := json.Unmarshal(b, &cfg); err != nil { + return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) + } + return cfg, nil } func writeLVMConfig(root string, cfg directLVMConfig) error { p := filepath.Join(root, "setup-config.json") b, err := json.Marshal(cfg) if err != nil { - return errors.Wrap(err, "error marshalling direct lvm config") + return fmt.Errorf("marshalling direct lvm config: %w", err) } - err = ioutil.WriteFile(p, b, 0600) - return errors.Wrap(err, "error writing direct lvm config to file") + if err := os.WriteFile(p, b, 0600); err != nil { + return fmt.Errorf("writing direct lvm config to file: %w", err) + } + return nil } func setupDirectLVM(cfg directLVMConfig) error { @@ -186,13 +189,13 @@ func setupDirectLVM(cfg directLVMConfig) error { for _, bin := range binaries { if _, err := exec.LookPath(bin); err != nil { - return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err) } } err := os.MkdirAll(lvmProfileDir, 0755) if err != nil { - return errors.Wrap(err, "error creating lvm profile directory") + return fmt.Errorf("creating lvm profile directory: %w", err) } if cfg.AutoExtendPercent == 0 { @@ -215,34 +218,37 @@ func setupDirectLVM(cfg directLVMConfig) error { out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) if err != nil { - return errors.Wrap(err, "error writing storage thinp autoextend profile") + return fmt.Errorf("writing storage thinp autoextend profile: %w", err) } out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - return errors.Wrap(err, string(out)) + if err != nil { + return fmt.Errorf("%s: %w", string(out), err) + } + return nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index 19fb3fda9..697a16fda 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1,12 +1,14 @@ +//go:build linux && cgo // +build linux,cgo package devmapper import ( "bufio" + "errors" "fmt" "io" - "io/ioutil" + "io/fs" "os" "os/exec" "path" @@ -27,7 +29,6 @@ import ( "github.com/containers/storage/pkg/parsers/kernel" units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -298,7 +299,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } defer file.Close() if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err) } } else if fi.Size() > size { logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) @@ -329,7 +330,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error { // Given json data and file path, write it to disk func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("devmapper: Error creating metadata file: %s", err) } @@ -419,40 +420,35 @@ func (devices *DeviceSet) constructDeviceIDMap() { } } -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { + if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if strings.HasPrefix(finfo.Name(), ".") { + if strings.HasPrefix(name, ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == deviceSetMetaFile { + if name == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == transactionMetaFile { + if name == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) - hash := finfo.Name() - if hash == base { - hash = "" - } - // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err) } return nil @@ -462,21 +458,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, info os.FileInfo, err error) error { + var scan = func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories - if info.IsDir() { + if d.IsDir() { return nil } - return devices.deviceFileWalkFunction(path, info) + return devices.deviceFileWalkFunction(path, d.Name()) } - return filepath.Walk(devices.metadataDir(), scan) + return filepath.WalkDir(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. @@ -551,7 +547,7 @@ func xfsSupported() error { f, err := os.Open("/proc/filesystems") if err != nil { - return errors.Wrapf(err, "error checking for xfs support") + return fmt.Errorf("checking for xfs support: %w", err) } defer f.Close() @@ -563,7 +559,7 @@ func xfsSupported() error { } if err := s.Err(); err != nil { - return errors.Wrapf(err, "error checking for xfs support") + return fmt.Errorf("checking for xfs support: %w", err) } return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) @@ -633,7 +629,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + jsonData, err := os.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err } @@ -734,7 +730,7 @@ func (devices *DeviceSet) initMetaData() error { devices.TransactionID = transactionID if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) + return fmt.Errorf("devmapper: Failed to load device files:%w", err) } devices.constructDeviceIDMap() @@ -873,7 +869,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint err = devices.cancelDeferredRemoval(baseInfo) if err != nil { // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrEnxio { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } devinfo = nil @@ -958,7 +954,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf func (devices *DeviceSet) loadMetadata(hash string) *devInfo { info := &devInfo{Hash: hash, devices: devices} - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + jsonData, err := os.ReadFile(devices.metadataFile(info)) if err != nil { logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) return nil @@ -980,7 +976,7 @@ func (devices *DeviceSet) loadMetadata(hash string) *devInfo { func getDeviceUUID(device string) (string, error) { out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err) } uuid := strings.TrimSuffix(string(out), "\n") @@ -1088,7 +1084,7 @@ func (devices *DeviceSet) createBaseImage() error { } if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) } return nil @@ -1101,7 +1097,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { info, err := devicemapper.GetInfo(thinPoolDevice) if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err) } // Device does not exist. @@ -1111,7 +1107,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err) } if deviceType != "thin-pool" { @@ -1143,13 +1139,13 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { // If BaseDeviceUUID is nil (upgrade case), save it and return success. if devices.BaseDeviceUUID == "" { if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) } return nil } if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err) } return nil @@ -1188,7 +1184,7 @@ func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { func (devices *DeviceSet) growFS(info *devInfo) error { if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) + return fmt.Errorf("activating devmapper device: %s", err) } defer devices.deactivateDevice(info) @@ -1209,7 +1205,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) } defer func() { @@ -1221,14 +1217,14 @@ func (devices *DeviceSet) growFS(info *devInfo) error { switch devices.BaseDeviceFilesystem { case ext4: if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) } case xfs: if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) } default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem) } return nil } @@ -1279,11 +1275,11 @@ func (devices *DeviceSet) setupBaseImage() error { } func setCloseOnExec(name string) { - fileInfos, _ := ioutil.ReadDir("/proc/self/fd") - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + fileEntries, _ := os.ReadDir("/proc/self/fd") + for _, e := range fileEntries { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name())) if link == name { - fd, err := strconv.Atoi(i.Name()) + fd, err := strconv.Atoi(e.Name()) if err == nil { unix.CloseOnExec(fd) } @@ -1351,7 +1347,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { // Reload size for loopback device if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) + return fmt.Errorf("unable to update loopback capacity: %s", err) } // Suspend the pool @@ -1373,7 +1369,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + jsonData, err := os.ReadFile(devices.transactionMetaFile()) if err != nil { // There is no active transaction. This will be the case // during upgrade. @@ -1454,7 +1450,7 @@ func (devices *DeviceSet) processPendingTransaction() error { } func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + jsonData, err := os.ReadFile(devices.deviceSetMetaFile()) if err != nil { // For backward compatibility return success if file does // not exist. @@ -1510,7 +1506,7 @@ func determineDriverCapabilities(version string) error { versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) + return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported) } if major > 4 { @@ -1524,7 +1520,7 @@ func determineDriverCapabilities(version string) error { minor, err := strconv.Atoi(versionSplit[1]) if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) + return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported) } /* @@ -1786,7 +1782,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { } switch fsMagic { case graphdriver.FsMagicAufs: - return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") } if devices.dataDevice == "" { @@ -1982,7 +1978,7 @@ func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, } return uint64(size), nil default: - return 0, fmt.Errorf("Unknown option %s", key) + return 0, fmt.Errorf("unknown option %s", key) } } @@ -2016,7 +2012,7 @@ func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) err // If syncDelete is true, we want to return error. If deferred // deletion is not enabled, we return an error. If error is // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { + if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) { logrus.Debugf("devmapper: Error deleting device: %s", err) return err } @@ -2177,7 +2173,7 @@ func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove boo // This function's semantics is such that it does not return an // error if device does not exist. So if device went away by // the time we actually tried to remove it, do not return error. - if errors.Cause(err) != devicemapper.ErrEnxio { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } return nil @@ -2195,7 +2191,7 @@ func (devices *DeviceSet) removeDevice(devname string) error { if err == nil { break } - if errors.Cause(err) != devicemapper.ErrBusy { + if !errors.Is(err, devicemapper.ErrBusy) { return err } @@ -2229,7 +2225,7 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { // Cancel deferred remove if err := devices.cancelDeferredRemoval(info); err != nil { // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrBusy { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } } @@ -2246,7 +2242,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { for i := 0; i < 100; i++ { err = devicemapper.CancelDeferredRemove(info.Name()) if err != nil { - if errors.Cause(err) != devicemapper.ErrBusy { + if !errors.Is(err, devicemapper.ErrBusy) { // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() @@ -2261,7 +2257,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { } func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) return @@ -2347,21 +2343,21 @@ func (devices *DeviceSet) Shutdown(home string) error { func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { dmDevicePath, err := os.Readlink(info.DevName()) if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err) } dmDeviceName := path.Base(dmDevicePath) filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err) } defer maxRetriesFile.Close() // Set max retries to 0 _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err) } return nil } @@ -2412,7 +2408,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) } if fstype == xfs && devices.xfsNospaceRetries != "" { @@ -2793,7 +2789,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_percent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err) } if per >= 100 { return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") @@ -2802,7 +2798,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_metapercent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err) } if per >= 100 { return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") @@ -2811,7 +2807,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_autoextend_percent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err) } if per > 100 { return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") @@ -2820,7 +2816,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_autoextend_threshold": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err) } if per > 100 { return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") @@ -2829,10 +2825,10 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.libdm_log_level": level, err := strconv.ParseInt(val, 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err) } if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) } // Register a new logging callback with the specified level. devicemapper.LogInit(devicemapper.DefaultLogger{ diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go index 418b9e610..f85fb9479 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index d2f165e26..8b3ee51df 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -1,10 +1,10 @@ +//go:build linux && cgo // +build linux,cgo package devmapper import ( "fmt" - "io/ioutil" "os" "path" "strconv" @@ -23,7 +23,7 @@ import ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("devicemapper", Init) + graphdriver.MustRegister("devicemapper", Init) } // Driver contains the device set mounted and the home directory @@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + if err := os.WriteFile(idFile, []byte(id), 0600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err @@ -267,6 +267,11 @@ func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go index 54db6ab4a..52f0e863e 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go @@ -1,3 +1,6 @@ +//go:build linux && cgo +// +build linux,cgo + package devmapper import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go index 41e73faf5..db903ca55 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 770b431bd..b3b0614fd 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -1,20 +1,19 @@ package graphdriver import ( + "errors" "fmt" "io" "os" "path/filepath" "strings" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" ) // FsMagic unsigned id of the filesystem in use. @@ -39,7 +38,7 @@ var ( ErrLayerUnknown = errors.New("unknown layer") ) -//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// CreateOpts contains optional arguments for Create() and CreateReadWrite() // methods. type CreateOpts struct { MountLabel string @@ -48,13 +47,13 @@ type CreateOpts struct { ignoreChownErrors bool } -// MountOpts contains optional arguments for LayerStope.Mount() methods. +// MountOpts contains optional arguments for Driver.Get() methods. type MountOpts struct { // Mount label is the MAC Labels to assign to mount point (SELINUX) MountLabel string // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point - UidMaps []idtools.IDMap // nolint: golint - GidMaps []idtools.IDMap // nolint: golint + UidMaps []idtools.IDMap //nolint: golint,revive + GidMaps []idtools.IDMap //nolint: golint Options []string // Volatile specifies whether the container storage can be optimized @@ -110,6 +109,9 @@ type ProtoDriver interface { // Exists returns whether a filesystem layer with the specified // ID exists on this driver. Exists(id string) bool + // Returns a list of layer ids that exist on this driver (does not include + // additional storage layers). Not supported by all backends. + ListLayers() ([]string, error) // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. Status() [][2]string @@ -279,10 +281,18 @@ func init() { drivers = make(map[string]InitFunc) } +// MustRegister registers an InitFunc for the driver, or panics. +// It is suitable for package’s init() sections. +func MustRegister(name string, initFunc InitFunc) { + if err := Register(name, initFunc); err != nil { + panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err)) + } +} + // Register registers an InitFunc for the driver. func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) + return fmt.Errorf("name already registered %s", name) } drivers[name] = initFunc @@ -296,7 +306,7 @@ func GetDriver(name string, config Options) (Driver, error) { } logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) - return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) + return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported) } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins @@ -305,13 +315,14 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) { return initFunc(filepath.Join(home, name), options) } logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) + return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported) } // Options is used to initialize a graphdriver type Options struct { Root string RunRoot string + DriverPriority []string DriverOptions []string UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap @@ -327,9 +338,18 @@ func New(name string, config Options) (Driver, error) { // Guess for prior driver driversMap := scanPriorDrivers(config.Root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. + + // use the supplied priority list unless it is empty + prioList := config.DriverPriority + if len(prioList) == 0 { + prioList = priority + } + + for _, name := range prioList { + if name == "vfs" && len(config.DriverPriority) == 0 { + // don't use vfs even if there is state present and vfs + // has not been explicitly added to the override driver + // priority list continue } if _, prior := driversMap[name]; prior { @@ -362,7 +382,7 @@ func New(name string, config Options) (Driver, error) { } // Check for priority drivers first - for _, name := range priority { + for _, name := range prioList { driver, err := getBuiltinDriver(name, config.Root, config) if err != nil { if isDriverNotSupported(err) { @@ -384,14 +404,13 @@ func New(name string, config Options) (Driver, error) { } return driver, nil } - return nil, fmt.Errorf("No supported storage backend found") + return nil, fmt.Errorf("no supported storage backend found") } // isDriverNotSupported returns true if the error initializing // the graph driver is a non-supported error. func isDriverNotSupported(err error) bool { - cause := errors.Cause(err) - return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS + return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS) } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers @@ -406,3 +425,21 @@ func scanPriorDrivers(root string) map[string]bool { } return driversMap } + +// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging. +// Typical usage: +// +// func …(…) (err error) { +// … +// defer driverPut(driver, id, &err) +// } +func driverPut(driver ProtoDriver, id string, mainErr *error) { + if err := driver.Put(id); err != nil { + err = fmt.Errorf("unmounting layer %s: %w", id, err) + if *mainErr == nil { + *mainErr = err + } else { + logrus.Errorf(err.Error()) + } + } +} diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go new file mode 100644 index 000000000..357851543 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "vfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index e1320ee07..143cccf92 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -2,15 +2,43 @@ package graphdriver import ( "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", } ) +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index dddf8a8b4..b9e57a60d 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package graphdriver @@ -6,6 +7,7 @@ import ( "path/filepath" "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -50,6 +52,40 @@ const ( FsMagicOverlay = FsMagic(0x794C7630) // FsMagicFUSE filesystem id for FUSE FsMagicFUSE = FsMagic(0x65735546) + // FsMagicAcfs filesystem id for Acfs + FsMagicAcfs = FsMagic(0x61636673) + // FsMagicAfs filesystem id for Afs + FsMagicAfs = FsMagic(0x5346414f) + // FsMagicCephFs filesystem id for Ceph + FsMagicCephFs = FsMagic(0x00C36400) + // FsMagicCIFS filesystem id for CIFS + FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicFHGFS filesystem id for FHGFS + FsMagicFHGFSFs = FsMagic(0x19830326) + // FsMagicIBRIX filesystem id for IBRIX + FsMagicIBRIX = FsMagic(0x013111A8) + // FsMagicKAFS filesystem id for KAFS + FsMagicKAFS = FsMagic(0x6B414653) + // FsMagicLUSTRE filesystem id for LUSTRE + FsMagicLUSTRE = FsMagic(0x0BD00BD0) + // FsMagicNCP filesystem id for NCP + FsMagicNCP = FsMagic(0x564C) + // FsMagicNFSD filesystem id for NFSD + FsMagicNFSD = FsMagic(0x6E667364) + // FsMagicOCFS2 filesystem id for OCFS2 + FsMagicOCFS2 = FsMagic(0x7461636F) + // FsMagicPANFS filesystem id for PANFS + FsMagicPANFS = FsMagic(0xAAD7AAEA) + // FsMagicPRLFS filesystem id for PRLFS + FsMagicPRLFS = FsMagic(0x7C7C6673) + // FsMagicSMB2 filesystem id for SMB2 + FsMagicSMB2 = FsMagic(0xFE534D42) + // FsMagicSNFS filesystem id for SNFS + FsMagicSNFS = FsMagic(0xBEEFDEAD) + // FsMagicVBOXSF filesystem id for VBOXSF + FsMagicVBOXSF = FsMagic(0x786F4256) + // FsMagicVXFS filesystem id for VXFS + FsMagicVXFS = FsMagic(0xA501FCF5) ) var ( @@ -92,9 +128,14 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { var buf unix.Statfs_t - if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + path := filepath.Dir(rootpath) + if err := unix.Statfs(path, &buf); err != nil { return 0, err } + + if _, ok := FsNames[FsMagic(buf.Type)]; !ok { + logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path) + } return FsMagic(buf.Type), nil } @@ -128,11 +169,32 @@ func (c *defaultChecker) IsMounted(path string) bool { return m } +// isMountPoint checks that the given path is a mount point +func isMountPoint(mountPath string) (bool, error) { + // it is already the root + if mountPath == "/" { + return true, nil + } + + var s1, s2 unix.Stat_t + if err := unix.Stat(mountPath, &s1); err != nil { + return true, err + } + if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil { + return true, err + } + return s1.Dev != s2.Dev, nil +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { return false, err } - return FsMagic(buf.Type) == fsType, nil + if FsMagic(buf.Type) != fsType { + return false, nil + } + return isMountPoint(mountPath) } diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 174fa9670..e32930916 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 4a875608b..8119d9a6c 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux,!windows,!freebsd,!solaris +//go:build !linux && !windows && !freebsd && !solaris && !darwin +// +build !linux,!windows,!freebsd,!solaris,!darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 93743d177..6b2496ec5 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -2,13 +2,15 @@ package graphdriver import ( "io" + "os" + "runtime" "time" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" - rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -31,10 +33,11 @@ type NaiveDiffDriver struct { // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: -// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) -// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) -// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) -// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +// +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} } @@ -62,7 +65,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare defer func() { if err != nil { - driver.Put(id) + driverPut(driver, id, &err) } }() @@ -77,7 +80,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) + driverPut(driver, id, &err) return err }), nil } @@ -87,7 +90,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare if err != nil { return nil, err } - defer driver.Put(parent) + defer driverPut(driver, parent, &err) changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) if err != nil { @@ -101,20 +104,20 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) + driverPut(driver, id, &err) // NaiveDiffDriver compares file metadata with parent layers. Parent layers // are extracted from tar's with full second precision on modified time. // We need this hack here to make sure calls within same second receive // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) return err }), nil } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) { driver := gdw.ProtoDriver if idMappings == nil { @@ -131,19 +134,20 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if err != nil { return nil, err } - defer driver.Put(id) + defer driverPut(driver, id, &retErr) parentFs := "" if parent != "" { options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } parentFs, err = driver.Get(parent, options) if err != nil { return nil, err } - defer driver.Put(parent) + defer driverPut(driver, parent, &retErr) } return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) @@ -167,11 +171,18 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) if err != nil { return } - defer driver.Put(id) + defer driverPut(driver, id, &err) + + defaultForceMask := os.FileMode(0700) + var forceMask *os.FileMode // = nil + if runtime.GOOS == "darwin" { + forceMask = &defaultForceMask + } tarOptions := &archive.TarOptions{ - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), IgnoreChownErrors: options.IgnoreChownErrors, + ForceMask: forceMask, } if options.Mappings != nil { tarOptions.UIDMaps = options.Mappings.UIDs() @@ -180,7 +191,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) start := time.Now().UTC() logrus.Debug("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil { - logrus.Errorf("Error while applying layer: %s", err) + logrus.Errorf("While applying layer: %s", err) return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) @@ -213,7 +224,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, if err != nil { return } - defer driver.Put(id) + defer driverPut(driver, id, &err) return archive.ChangesSize(layerFs, changes), nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 43fe00625..0a0ad7dd5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,21 +1,22 @@ +//go:build linux // +build linux package overlay import ( + "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" "syscall" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -25,7 +26,7 @@ import ( // directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. // When these exist naive diff should be used. func doesSupportNativeDiff(d, mountOpts string) error { - td, err := ioutil.TempDir(d, "opaque-bug-check") + td, err := os.MkdirTemp(d, "opaque-bug-check") if err != nil { return err } @@ -57,16 +58,21 @@ func doesSupportNativeDiff(d, mountOpts string) error { // Mark l2/d as opaque if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil { - return errors.Wrap(err, "failed to set opaque flag on middle layer") + return fmt.Errorf("failed to set opaque flag on middle layer: %w", err) } - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s" + if unshare.IsRootless() { + mountFlags = mountFlags + ",userxattr" + } + + opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) flags, data := mount.ParseOptions(mountOpts) if data != "" { opts = fmt.Sprintf("%s,%s", opts, data) } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") + return fmt.Errorf("failed to mount overlay: %w", err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -75,14 +81,14 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { - return errors.Wrap(err, "failed to write to merged directory") + if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return fmt.Errorf("failed to write to merged directory: %w", err) } // Check l3/d does not have opaque flag xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque")) if err != nil { - return errors.Wrap(err, "failed to read opaque flag on upper layer") + return fmt.Errorf("failed to read opaque flag on upper layer: %w", err) } if string(xattrOpaque) == "y" { return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") @@ -94,12 +100,12 @@ func doesSupportNativeDiff(d, mountOpts string) error { if err.(*os.LinkError).Err == syscall.EXDEV { return nil } - return errors.Wrap(err, "failed to rename dir in merged directory") + return fmt.Errorf("failed to rename dir in merged directory: %w", err) } // get the xattr of "d2" xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect")) if err != nil { - return errors.Wrap(err, "failed to read redirect flag on upper layer") + return fmt.Errorf("failed to read redirect flag on upper layer: %w", err) } if string(xattrRedirect) == "d1" { @@ -114,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { // copying up a file from a lower layer unless/until its contents are being // modified func doesMetacopy(d, mountOpts string) (bool, error) { - td, err := ioutil.TempDir(d, "metacopy-check") + td, err := os.MkdirTemp(d, "metacopy-check") if err != nil { return false, err } @@ -150,11 +156,11 @@ func doesMetacopy(d, mountOpts string) (bool, error) { opts = fmt.Sprintf("%s,%s", opts, data) } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { - if errors.Cause(err) == unix.EINVAL { - logrus.Info("metacopy option not supported on this kernel", mountOpts) + if errors.Is(err, unix.EINVAL) { + logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts) return false, nil } - return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts) + return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -164,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { // Make a change that only impacts the inode, and check if the pulled-up copy is marked // as a metadata-only copy if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { - return false, errors.Wrap(err, "error changing permissions on file for metacopy check") + return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err) } metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) if err != nil { @@ -172,14 +178,14 @@ func doesMetacopy(d, mountOpts string) (bool, error) { logrus.Info("metacopy option not supported") return false, nil } - return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer") + return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err) } return metacopy != nil, nil } // doesVolatile checks if the filesystem supports the "volatile" mount option func doesVolatile(d string) (bool, error) { - td, err := ioutil.TempDir(d, "volatile-check") + td, err := os.MkdirTemp(d, "volatile-check") if err != nil { return false, err } @@ -204,7 +210,7 @@ func doesVolatile(d string) (bool, error) { // Mount using the mandatory options and configured options opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return false, errors.Wrapf(err, "failed to mount overlay for volatile check") + return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -213,3 +219,55 @@ func doesVolatile(d string) (bool, error) { }() return true, nil } + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := os.MkdirTemp(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, fmt.Errorf("create mapped mount: %w", err) + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go new file mode 100644 index 000000000..bec455dd4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -0,0 +1,45 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "errors" + "io/fs" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func scanForMountProgramIndicators(home string) (detected bool, err error) { + err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error { + if detected { + return fs.SkipDir + } + if err != nil { + return err + } + basename := filepath.Base(path) + if strings.HasPrefix(basename, archive.WhiteoutPrefix) { + detected = true + return fs.SkipDir + } + if d.IsDir() { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + for _, xattr := range xattrs { + if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { + detected = true + return fs.SkipDir + } + } + } + return nil + }) + return detected, err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 000000000..0b7c868ac --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,155 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := unix.MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err) + } + + var attr attr + attr.attrSet = unix.MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + var pid uintptr + var err syscall.Errno + + if runtime.GOARCH == "s390x" { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0) + } else { + pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + } + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go index 2a1e9d0cc..bedda3507 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + package overlay import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 7c8fd50a3..de47951d4 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -7,14 +8,16 @@ import ( "flag" "fmt" "os" + "path/filepath" "runtime" + "strings" "github.com/containers/storage/pkg/reexec" "golang.org/x/sys/unix" ) func init() { - reexec.Register("storage-mountfrom", mountFromMain) + reexec.Register("storage-mountfrom", mountOverlayFromMain) } func fatal(err error) { @@ -30,7 +33,7 @@ type mountOptions struct { Flag uint32 } -func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { +func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error { options := &mountOptions{ Device: device, Target: target, @@ -42,7 +45,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e cmd := reexec.Command("storage-mountfrom", dir) w, err := cmd.StdinPipe() if err != nil { - return fmt.Errorf("mountfrom error on pipe creation: %v", err) + return fmt.Errorf("mountfrom error on pipe creation: %w", err) } output := bytes.NewBuffer(nil) @@ -50,23 +53,23 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e cmd.Stderr = output if err := cmd.Start(); err != nil { w.Close() - return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + return fmt.Errorf("mountfrom error on re-exec cmd: %w", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() - return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + return fmt.Errorf("mountfrom json encode to pipe failed: %w", err) } w.Close() if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err) } return nil } // mountfromMain is the entry-point for storage-mountfrom on re-exec. -func mountFromMain() { +func mountOverlayFromMain() { runtime.LockOSThread() flag.Parse() @@ -76,11 +79,96 @@ func mountFromMain() { fatal(err) } - if err := os.Chdir(flag.Arg(0)); err != nil { + // Mount the arguments passed from the specified directory. Some of the + // paths mentioned in the values we pass to the kernel are relative to + // the specified directory. + homedir := flag.Arg(0) + if err := os.Chdir(homedir); err != nil { fatal(err) } - if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + pageSize := unix.Getpagesize() + if len(options.Label) < pageSize { + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + os.Exit(0) + } + + // Those arguments still took up too much space. Open the diff + // directories and use their descriptor numbers as lowers, using + // /proc/self/fd as the current directory. + + // Split out the various options, since we need to manipulate the + // paths, but we don't want to mess with other options. + var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string + for _, arg := range strings.Split(options.Label, ",") { + kv := strings.SplitN(arg, "=", 2) + switch kv[0] { + case "upperdir": + upperk = "upperdir=" + upperv = kv[1] + case "workdir": + workk = "workdir=" + workv = kv[1] + case "lowerdir": + lowerk = "lowerdir=" + lowerv = kv[1] + case "label": + labelk = "label=" + labelv = kv[1] + default: + if others == "" { + others = arg + } else { + others = others + "," + arg + } + } + } + + // Make sure upperdir, workdir, and the target are absolute paths. + if upperv != "" && !filepath.IsAbs(upperv) { + upperv = filepath.Join(homedir, upperv) + } + if workv != "" && !filepath.IsAbs(workv) { + workv = filepath.Join(homedir, workv) + } + if !filepath.IsAbs(options.Target) { + options.Target = filepath.Join(homedir, options.Target) + } + + // Get a descriptor for each lower, and use that descriptor's name as + // the new value for the list of lowers, because it's shorter. + if lowerv != "" { + lowers := strings.Split(lowerv, ":") + for i := range lowers { + lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0) + if err != nil { + fatal(err) + } + lowers[i] = fmt.Sprintf("%d", lowerFd) + } + lowerv = strings.Join(lowers, ":") + } + + // Reconstruct the Label field. + options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others + options.Label = strings.ReplaceAll(options.Label, ",,", ",") + + // Okay, try this, if we managed to make the arguments fit. + var err error + if len(options.Label) < pageSize { + if err := os.Chdir("/proc/self/fd"); err != nil { + fatal(err) + } + err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label) + } else { + err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize) + } + + // Clean up. + if err != nil { + fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label) fatal(err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index d5d161bfd..e33bf16db 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -5,9 +6,9 @@ package overlay import ( "bytes" "encoding/base64" + "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" "path" @@ -16,6 +17,7 @@ import ( "strings" "sync" "syscall" + "unicode" graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" @@ -25,7 +27,6 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -33,12 +34,9 @@ import ( units "github.com/docker/go-units" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" - rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -76,21 +74,23 @@ const ( // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount -// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// syscall. A hard upper limit of 500 lower layers is enforced to ensure // that mounts do not fail due to length. const ( linkDir = "l" lowerFile = "lower" - maxDepth = 128 + maxDepth = 500 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation - // is true (512 is a buffer for label metadata). - // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + // is true (512 is a buffer for label metadata, 128 is the + // number of lowers we want to be able to use without having + // to use bind mounts to get all the way to the kernel limit). + // ((idLength + len(linkDir) + 1) * 128) <= (pageSize - 512) idLength = 26 ) @@ -119,7 +119,8 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool - locker *locker.Locker + + supportsIDMappedMounts *bool } type additionalLayerStore struct { @@ -142,8 +143,8 @@ var ( ) func init() { - graphdriver.Register("overlay", Init) - graphdriver.Register("overlay2", Init) + graphdriver.MustRegister("overlay", Init) + graphdriver.MustRegister("overlay2", Init) } func hasMetacopyOption(opts []string) bool { @@ -155,6 +156,15 @@ func hasMetacopyOption(opts []string) bool { return false } +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + func hasVolatileOption(opts []string) bool { for _, s := range opts { if s == "volatile" { @@ -174,27 +184,51 @@ func checkSupportVolatile(home, runhome string) (bool, error) { var usingVolatile bool if err == nil { if volatileCacheResult { - logrus.Debugf("cached value indicated that volatile is being used") + logrus.Debugf("Cached value indicated that volatile is being used") } else { - logrus.Debugf("cached value indicated that volatile is not being used") + logrus.Debugf("Cached value indicated that volatile is not being used") } usingVolatile = volatileCacheResult } else { usingVolatile, err = doesVolatile(home) if err == nil { if usingVolatile { - logrus.Debugf("overlay test mount indicated that volatile is being used") + logrus.Debugf("overlay: test mount indicated that volatile is being used") } else { - logrus.Debugf("overlay test mount indicated that volatile is not being used") + logrus.Debugf("overlay: test mount indicated that volatile is not being used") } if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { - return false, errors.Wrap(err, "error recording volatile-being-used status") + return false, fmt.Errorf("recording volatile-being-used status: %w", err) } } } return usingVolatile, nil } +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + if os.Geteuid() != 0 { + return false, nil + } + + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2) + } + return supportsIDMappedMounts, err +} + func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { var supportsDType bool @@ -206,9 +240,9 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) if err == nil { if overlayCacheResult { - logrus.Debugf("cached value indicated that overlay is supported") + logrus.Debugf("Cached value indicated that overlay is supported") } else { - logrus.Debugf("cached value indicated that overlay is not supported") + logrus.Debugf("Cached value indicated that overlay is not supported") } supportsDType = overlayCacheResult if !supportsDType { @@ -223,14 +257,14 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str if ok && patherr.Err == syscall.ENOSPC { return false, err } - err = errors.Wrap(err, "kernel does not support overlay fs") + err = fmt.Errorf("kernel does not support overlay fs: %w", err) if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { - return false, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err) + return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2) } return false, err } if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { - return false, errors.Wrap(err, "error recording overlay support status") + return false, fmt.Errorf("recording overlay support status: %w", err) } } return supportsDType, nil @@ -248,6 +282,23 @@ func (d *Driver) getSupportsVolatile() (bool, error) { return supportsVolatile, nil } +// isNetworkFileSystem checks if the specified file system is supported by native overlay +// as backing store when running in a user namespace. +func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { + switch fsMagic { + // a bunch of network file systems... + case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, + graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, + graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, + graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, + graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, + graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS: + return true + } + return false +} + // Init returns the a native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. @@ -261,40 +312,61 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if err != nil { return nil, err } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - if opts.mountProgram != "" { - f, err := os.Create(getMountProgramFlagFile(home)) - if err == nil { - f.Close() - } - } else { - // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs - if opts.forceMask != nil { - return nil, errors.New("'force_mask' is supported only with 'mount_program'") - } - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs) - } + fsName, ok := graphdriver.FsNames[fsMagic] + if !ok { + return nil, fmt.Errorf("filesystem type %#x reported for %s is not supported with 'overlay': %w", fsMagic, filepath.Dir(home), graphdriver.ErrIncompatibleFS) } + backingFs = fsName + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return nil, err } // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil { return nil, err } - runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { return nil, err } + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + + if opts.mountProgram != "" { + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { + m := os.FileMode(0700) + opts.forceMask = &m + logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) + } + + if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + return nil, err + } + } else { + if opts.forceMask != nil { + return nil, errors.New("'force_mask' is supported only with 'mount_program'") + } + // check if they are running over btrfs, aufs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS) + } + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { + return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS) + } + } + var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -311,24 +383,24 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature) if err == nil { if metacopyCacheResult { - logrus.Debugf("cached value indicated that metacopy is being used") + logrus.Debugf("Cached value indicated that metacopy is being used") } else { - logrus.Debugf("cached value indicated that metacopy is not being used") + logrus.Debugf("Cached value indicated that metacopy is not being used") } usingMetacopy = metacopyCacheResult } else { usingMetacopy, err = doesMetacopy(home, opts.mountOptions) if err == nil { if usingMetacopy { - logrus.Debugf("overlay test mount indicated that metacopy is being used") + logrus.Debugf("overlay: test mount indicated that metacopy is being used") } else { - logrus.Debugf("overlay test mount indicated that metacopy is not being used") + logrus.Debugf("overlay: test mount indicated that metacopy is not being used") } if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { - return nil, errors.Wrap(err, "error recording metacopy-being-used status") + return nil, fmt.Errorf("recording metacopy-being-used status: %w", err) } } else { - logrus.Infof("overlay test mount did not indicate whether or not metacopy is being used: %v", err) + logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err) return nil, err } } @@ -355,7 +427,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, - locker: locker.New(), options: *opts, } @@ -364,12 +435,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true - } else if opts.quota.Size > 0 { - return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err) } - } else if opts.quota.Size > 0 { + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) + return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) } logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) @@ -400,6 +471,13 @@ func parseOptions(options []string) (*overlayOptions, error) { return nil, err } o.quota.Size = uint64(size) + case "inodes": + logrus.Debugf("overlay: inodes=%s", val) + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + o.quota.Inodes = uint64(inodes) case "imagestore", "additionalimagestore": logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths @@ -413,7 +491,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(store) if err != nil { - return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: image path %q must be a directory", store) @@ -434,7 +512,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(lstore) if err != nil { - return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir") + return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore) @@ -461,7 +539,7 @@ func parseOptions(options []string) (*overlayOptions, error) { if val != "" { _, err := os.Stat(val) if err != nil { - return nil, errors.Wrapf(err, "overlay: can't stat program %q", val) + return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err) } } o.mountProgram = val @@ -505,11 +583,11 @@ func cachedFeatureSet(feature string, set bool) string { } func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { - content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) + content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) if err == nil { return true, string(content), nil } - content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) + content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) if err == nil { return false, string(content), nil } @@ -527,17 +605,34 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) ( return err } -func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { - if os.Geteuid() != 0 || graphroot == "" || rundir == "" { +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { return false, nil } - home := filepath.Join(graphroot, "overlay") - runhome := filepath.Join(rundir, "overlay") - - if _, err := os.Stat(getMountProgramFlagFile(home)); err == nil { - logrus.Debugf("overlay storage already configured with a mount-program") + var contents string + flagContent, err := os.ReadFile(getMountProgramFlagFile(home)) + if err == nil { + contents = strings.TrimSpace(string(flagContent)) + } + switch contents { + case "true": + logrus.Debugf("overlay: storage already configured with a mount-program") return false, nil + default: + needsMountProgram, err := scanForMountProgramIndicators(home) + if err != nil && !os.IsNotExist(err) { + return false, err + } + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + return false, err + } + if needsMountProgram { + return false, nil + } + // fall through to check if we find ourselves needing to use a + // mount program now + case "false": } for _, dir := range []string{home, runhome} { @@ -565,7 +660,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI logLevel = logrus.DebugLevel } - layerDir, err := ioutil.TempDir(home, "compat") + layerDir, err := os.MkdirTemp(home, "compat") if err != nil { patherr, ok := err.(*os.PathError) if ok && patherr.Err == syscall.ENOSPC { @@ -613,14 +708,18 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI if unshare.IsRootless() { flags = fmt.Sprintf("%s,userxattr", flags) } + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { + logrus.Debugf("Unable to create kernel-style whiteout: %v", err) + return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err) + } if len(flags) < unix.Getpagesize() { err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) if err == nil { - logrus.Debugf("overlay test mount with multiple lowers succeeded") + logrus.Debugf("overlay: test mount with multiple lowers succeeded") return supportsDType, nil } - logrus.Debugf("overlay test mount with multiple lowers failed %v", err) + logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err) } flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) if selinux.GetEnabled() { @@ -629,17 +728,17 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI if len(flags) < unix.Getpagesize() { err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) if err == nil { - logrus.StandardLogger().Logf(logLevel, "overlay test mount with multiple lowers failed, but succeeded with a single lower") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported) } - logrus.Debugf("overlay test mount with a single lower failed %v", err) + logrus.Debugf("overlay: test mount with a single lower failed: %v", err) } logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) - return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS) } logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported) } func (d *Driver) useNaiveDiff() bool { @@ -652,9 +751,9 @@ func (d *Driver) useNaiveDiff() bool { nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature) if err == nil { if nativeDiffCacheResult { - logrus.Debugf("cached value indicated that native-diff is usable") + logrus.Debugf("Cached value indicated that native-diff is usable") } else { - logrus.Debugf("cached value indicated that native-diff is not being used") + logrus.Debugf("Cached value indicated that native-diff is not being used") logrus.Info(nativeDiffCacheText) } useNaiveDiffOnly = !nativeDiffCacheResult @@ -784,7 +883,14 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } - return d.create(id, parent, opts) + if _, ok := opts.StorageOpt["inodes"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10) + } + + return d.create(id, parent, opts, false) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. @@ -794,12 +900,16 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } + + if _, ok := opts.StorageOpt["inodes"]; ok { + return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") + } } - return d.create(id, parent, opts) + return d.create(id, parent, opts, true) } -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { dir := d.dir(id) uidMaps := d.uidMaps @@ -810,15 +920,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr gidMaps = opts.IDMappings.GIDs() } + // Make the link directory if it does not exist + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return err } - // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { - return err + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil { return err } if parent != "" { @@ -829,18 +946,30 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr rootUID = int(st.UID()) rootGID = int(st.GID()) } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { - os.RemoveAll(dir) + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } } }() - if d.quotaCtl != nil { + if d.quotaCtl != nil && !disableQuota { quota := quota.Quota{} if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} @@ -850,7 +979,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr if driver.options.quota.Size > 0 { quota.Size = driver.options.quota.Size } - + if driver.options.quota.Inodes > 0 { + quota.Inodes = driver.options.quota.Inodes + } } // Set container disk quota limit // If it is set to 0, we will track the disk usage, but not enforce a limit @@ -881,7 +1012,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr } // Write link id to link file - if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } @@ -902,7 +1033,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr return err } if lower != "" { - if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } @@ -922,8 +1053,14 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e return err } driver.options.quota.Size = uint64(size) + case "inodes": + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + driver.options.quota.Inodes = uint64(inodes) default: - return fmt.Errorf("Unknown option %s", key) + return fmt.Errorf("unknown option %s", key) } } @@ -939,23 +1076,23 @@ func (d *Driver) getLower(parent string) (string, error) { } // Read Parent link fileA - parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + parentLink, err := os.ReadFile(path.Join(parentDir, "link")) if err != nil { if !os.IsNotExist(err) { return "", err } logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) if err := d.recreateSymlinks(); err != nil { - return "", errors.Wrap(err, "error recreating the links") + return "", fmt.Errorf("recreating the links: %w", err) } - parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link")) + parentLink, err = os.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } } lowers := []string{path.Join(linkDir, string(parentLink))} - parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) @@ -964,22 +1101,27 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l + return l, true } } } - return newpath + return newpath, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string - lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lower := d.dir(s) @@ -990,7 +1132,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { if os.IsNotExist(err) { logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) if err := d.recreateSymlinks(); err != nil { - return nil, fmt.Errorf("error recreating the missing symlinks: %v", err) + return nil, fmt.Errorf("recreating the missing symlinks: %w", err) } // let's call Readlink on lower again now that we have recreated the missing symlinks lp, err = os.Readlink(lower) @@ -1047,11 +1189,8 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - lid, err := ioutil.ReadFile(path.Join(dir, "link")) + lid, err := os.ReadFile(path.Join(dir, "link")) if err == nil { if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logrus.Debugf("Failed to remove link: %v", err) @@ -1063,6 +1202,9 @@ func (d *Driver) Remove(id string) error { if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } + if d.quotaCtl != nil { + d.quotaCtl.ClearQuota(dir) + } return nil } @@ -1070,23 +1212,22 @@ func (d *Driver) Remove(id string) error { // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + // List all the directories under the home directory - dirs, err := ioutil.ReadDir(d.home) + dirs, err := os.ReadDir(d.home) if err != nil { - return fmt.Errorf("error reading driver home directory %q: %v", d.home, err) + return fmt.Errorf("reading driver home directory %q: %w", d.home, err) } - linksDir := filepath.Join(d.home, "l") // This makes the link directory if it doesn't exist - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { return err } // Keep looping as long as we take some corrective action in each iteration var errs *multierror.Error madeProgress := true + iterations := 0 for madeProgress { errs = nil madeProgress = false @@ -1094,13 +1235,13 @@ func (d *Driver) recreateSymlinks() error { // the layer's "link" file that points to the layer's "diff" directory. for _, dir := range dirs { // Skip over the linkDir and anything that is not a directory - if dir.Name() == linkDir || !dir.Mode().IsDir() { + if dir.Name() == linkDir || !dir.IsDir() { continue } // Read the "link" file under each layer to get the name of the symlink - data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "error reading name of symlink for %q", dir)) + errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1114,30 +1255,38 @@ func (d *Driver) recreateSymlinks() error { } madeProgress = true } else if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "error trying to stat %q", linkPath)) + errs = multierror.Append(errs, err) continue } } + + // linkDirFullPath is the full path to the linkDir + linkDirFullPath := filepath.Join(d.home, "l") // Now check if we somehow lost a "link" file, by making sure // that each symlink we have corresponds to one. - links, err := ioutil.ReadDir(linksDir) + links, err := os.ReadDir(linkDirFullPath) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "error reading links directory %q", linksDir)) + errs = multierror.Append(errs, err) continue } // Go through all of the symlinks in the "l" directory for _, link := range links { // Read the symlink's target, which should be "../$layer/diff" - target, err := os.Readlink(filepath.Join(linksDir, link.Name())) + target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name())) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "error reading target of link %q", link)) + errs = multierror.Append(errs, err) continue } targetComponents := strings.Split(target, string(os.PathSeparator)) if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { - errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) + errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - os.Remove(filepath.Join(linksDir, link.Name())) + if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } madeProgress = true continue } @@ -1145,15 +1294,22 @@ func (d *Driver) recreateSymlinks() error { // it has the basename of our symlink in it. targetID := targetComponents[1] linkFile := filepath.Join(d.dir(targetID), "link") - data, err := ioutil.ReadFile(linkFile) + data, err := os.ReadFile(linkFile) if err != nil || string(data) != link.Name() { - if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "error correcting link for layer %q", targetID)) + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. + if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } madeProgress = true } } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } } if errs != nil { return errs.ErrorOrNil() @@ -1162,23 +1318,25 @@ func (d *Driver) recreateSymlinks() error { } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return d.get(id, false, options) } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) + dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } - readWrite := true + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { disableShifting = true } + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } optsList := options.Options if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") @@ -1187,12 +1345,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // options otherwise the kernel refuses to follow the metacopy xattr. if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") optsList = append(optsList, "metacopy=on") + } + } + } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + if d.options.mountProgram == "" { + release := "" + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + release = " " + string(uts.Release[:]) + " " + string(uts.Version[:]) + } + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release) } else { - logrus.Warnf("ignoring metacopy option from storage.conf, not supported with booted kernel") + logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it") } } + optsList = stripOption(optsList, "metacopy=on") } + for _, o := range optsList { if o == "ro" { readWrite = false @@ -1200,7 +1373,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + lowers, err := os.ReadFile(path.Join(dir, lowerFile)) if err != nil && !os.IsNotExist(err) { return "", err } @@ -1209,27 +1382,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", errors.New("max depth exceeded") } - // absLowers is the list of lowers as absolute paths, which works well with additional stores. + // absLowers is the list of lowers as absolute paths. absLowers := []string{} - // relLowers is the list of lowers as paths relative to the driver's home directory. - relLowers := []string{} - // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers - // lists that we're building. "diff" itself is the upper, so it won't be in the lists. - link, err := ioutil.ReadFile(path.Join(dir, "link")) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link")) - if err := d.recreateSymlinks(); err != nil { - return "", errors.Wrap(err, "error recreating the links") - } - link, err = ioutil.ReadFile(path.Join(dir, "link")) - if err != nil { - return "", err - } - } diffN := 1 perms := defaultPerms if d.options.forceMask != nil { @@ -1243,7 +1398,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1268,6 +1422,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO perms = os.FileMode(st2.Mode()) permsKnown = true } + l = lower break } lower = "" @@ -1278,11 +1433,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if lower == "" && os.IsNotExist(err) { logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) if err := d.recreateSymlinks(); err != nil { - return "", fmt.Errorf("error recreating the missing symlinks: %v", err) + return "", fmt.Errorf("recreating the missing symlinks: %w", err) } lower = newpath } else if lower == "" { - return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err) } } else { if !permsKnown { @@ -1292,12 +1447,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower = newpath } absLowers = append(absLowers, lower) - relLowers = append(relLowers, l) diffN = 1 _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) diffN++ _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } @@ -1305,7 +1458,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) - relLowers = append(relLowers, path.Join(id, "empty")) } // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) @@ -1329,7 +1481,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) + logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) } } } @@ -1337,47 +1489,93 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workdir := path.Join(dir, "work") - var opts string - if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) - } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) - } - if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) - } - if d.options.mountProgram == "" && unshare.IsRootless() { - opts = fmt.Sprintf("%s,userxattr", opts) + optsList = append(optsList, "userxattr") } - // If "volatile" is not supported by the file system, just ignore the request - if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + if options.Volatile && !hasVolatileOption(optsList) { supported, err := d.getSupportsVolatile() if err != nil { return "", err } + // If "volatile" is not supported by the file system, just ignore the request if supported { - opts = fmt.Sprintf("%s,volatile", opts) + optsList = append(optsList, "volatile") } } + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err + } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + mountData := label.FormatMountLabel(opts, options.MountLabel) mountFunc := unix.Mount mountTarget := mergedDir pageSize := unix.Getpagesize() - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) } + // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well. + if d.options.forceMask != nil { + label = label + ",xattr_permissions=2" + } + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) mountProgram.Dir = d.home var b bytes.Buffer @@ -1388,25 +1586,28 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if output == "" { output = "" } - return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err) } return nil } - } else if len(mountData) > pageSize { + } else if len(mountData) >= pageSize { + // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if + // the mount data cannot fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chdir(). + workdir = path.Join(id, "work") - //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) } - mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } + mountData = label.FormatMountLabel(opts, options.MountLabel) mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) + return mountOverlayFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, "merged") } @@ -1421,7 +1622,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO flags, data := mount.ParseOptions(mountData) logrus.Debugf("overlay: mount_data=%s", mountData) if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err) + return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err) } return mergedDir, nil @@ -1429,8 +1630,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err @@ -1439,18 +1638,30 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } unmounted := false + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := os.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + if d.options.mountProgram != "" { // Attempt to unmount the FUSE mount using either fusermount or fusermount3. // If they fail, fallback to unix.Unmount for _, v := range []string{"fusermount3", "fusermount"} { err := exec.Command(v, "-u", mountpoint).Run() - if err != nil && errors.Cause(err) != exec.ErrNotFound { + if err != nil && !errors.Is(err, exec.ErrNotFound) { logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) } if err == nil { @@ -1490,6 +1701,40 @@ func (d *Driver) Exists(id string) bool { return err == nil } +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.home) + if err != nil { + return nil, err + } + + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + layers = append(layers, id) + } + + return layers, err +} + // isParent returns if the passed in parent is the direct parent of the passed in layer func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) @@ -1522,11 +1767,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { return whiteoutFormat } -type fileGetNilCloser struct { - storage.FileGetter +type overlayFileGetter struct { + diffDirs []string } -func (f fileGetNilCloser) Close() error { +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) +} + +func (g *overlayFileGetter) Close() error { return nil } @@ -1535,13 +1793,18 @@ func (d *Driver) getStagingDir() string { } // DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { p, err := d.getDiffPath(id) if err != nil { return nil, err } - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -1566,7 +1829,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = ioutil.TempDir(d.getStagingDir(), "") + applyDir, err = os.MkdirTemp(d.getStagingDir(), "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1586,7 +1849,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App GIDMaps: idMappings.GIDs(), IgnoreChownErrors: d.options.ignoreChownErrors, WhiteoutFormat: d.getWhiteoutFormat(), - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), }) out.Target = applyDir return out, err @@ -1644,7 +1907,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) IgnoreChownErrors: d.options.ignoreChownErrors, ForceMask: d.options.forceMask, WhiteoutFormat: d.getWhiteoutFormat(), - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), }); err != nil { return 0, err } @@ -1768,7 +2031,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) if err != nil { if err2 := d.Put(id); err2 != nil { - logrus.Errorf("%v; error unmounting %v: %v", err, id, err2) + logrus.Errorf("%v; unmounting %v: %v", err, id, err2) } return err } @@ -1816,12 +2079,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return nil } +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } - return d.options.mountProgram != "" + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() } // dumbJoin is more or less a dumber version of filepath.Join, but one which @@ -1856,22 +2138,21 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, filepath.Join(target, "blob"), } { if _, err := os.Stat(p); err != nil { - return "", errors.Wrapf(graphdriver.ErrLayerUnknown, - "failed to stat additional layer %q: %v", p, err) + wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) + return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) } } return target, nil } - return "", errors.Wrapf(graphdriver.ErrLayerUnknown, - "additional layer (%q, %q) not found", dgst, ref) + return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown) } func (d *Driver) releaseAdditionalLayerByID(id string) { if al, err := d.getAdditionalLayerPathByID(id); err == nil { notifyReleaseAdditionalLayer(al) } else if !os.IsNotExist(err) { - logrus.Warnf("unexpected error on reading Additional Layer Store pointer %v", err) + logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err) } } @@ -1889,7 +2170,7 @@ func (al *additionalLayer) Info() (io.ReadCloser, error) { return os.Open(filepath.Join(al.path, "info")) } -// Blob returns a reader of the raw contents of this leyer. +// Blob returns a reader of the raw contents of this layer. func (al *additionalLayer) Blob() (io.ReadCloser, error) { return os.Open(filepath.Join(al.path, "blob")) } @@ -1909,7 +2190,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } // tell the additional layer store that we use this layer. // mark this layer as "additional layer" - if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { return err } notifyUseAdditionalLayer(al.path) @@ -1917,7 +2198,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) { - al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer")) + al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer")) if err != nil { return "", err } @@ -1952,10 +2233,10 @@ func notifyUseAdditionalLayer(al string) { } else if err == nil { f.Close() if err := os.Remove(useFile); err != nil { - logrus.Warnf("failed to remove use file") + logrus.Warnf("Failed to remove use file") } } - logrus.Warnf("unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err) + logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err) } // notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified @@ -1972,7 +2253,7 @@ func notifyReleaseAdditionalLayer(al string) { if os.IsNotExist(err) { return } - logrus.Warnf("unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) + logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) } // redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and @@ -1990,3 +2271,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { } return diffPath, nil } + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go index 0b70a5d92..88bfbf9c7 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 1cdac7777..2a7a307a2 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go index 49af84a22..33b163a8c 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go index fc565ef0b..651990089 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -47,13 +48,13 @@ func generateID(l int) string { if retryOnError(err) && retries < maxretries { count += n retries++ - logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + logrus.Errorf("Generating version 4 uuid, retrying: %v", err) continue } // Any other errors represent a system problem. What did someone // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err)) } break diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go index 9fc57b36b..2670ef3df 100644 --- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlayutils @@ -6,7 +7,6 @@ import ( "fmt" graphdriver "github.com/containers/storage/drivers" - "github.com/pkg/errors" ) // ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. @@ -16,5 +16,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error { msg += " Reformat the filesystem with ftype=1 to enable d_type support." } msg += " Running without d_type is not supported." - return errors.Wrap(graphdriver.ErrNotSupported, msg) + return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported) } diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index d805623b5..f5484dee7 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -1,3 +1,4 @@ +//go:build linux && !exclude_disk_quota && cgo // +build linux,!exclude_disk_quota,cgo // @@ -38,8 +39,8 @@ struct fsxattr { #ifndef PRJQUOTA #define PRJQUOTA 2 #endif -#ifndef XFS_PROJ_QUOTA -#define XFS_PROJ_QUOTA 2 +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 #endif #ifndef Q_XSETPQLIM #define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) @@ -50,10 +51,13 @@ struct fsxattr { */ import "C" import ( + "errors" "fmt" - "io/ioutil" + "math" + "os" "path" "path/filepath" + "syscall" "unsafe" "github.com/containers/storage/pkg/directory" @@ -61,9 +65,12 @@ import ( "golang.org/x/sys/unix" ) -// Quota limit params - currently we only control blocks hard limit +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes type Quota struct { - Size uint64 + Size uint64 + Inodes uint64 } // Control - Context to be used by storage driver (e.g. overlay) @@ -72,6 +79,25 @@ type Control struct { backingFsBlockDev string nextProjectID uint32 quotas map[string]uint32 + basePath string +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) + + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil } // NewControl - initialize project quota support. @@ -80,22 +106,27 @@ type Control struct { // // Returns nil (and error) if project quota is not supported. // -// First get the project id of the home directory. +// First get the project id of the basePath directory. // This test will fail if the backing fs is not xfs. // // xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 999:/var/lib/containers/storage/overlay >> /etc/projects -// echo storage:999 >> /etc/projid -// xfs_quota -x -c 'project -s storage' / -// -// In that case, the home directory project id will be used as a "start offset" -// and all containers will be assigned larger project ids (e.g. >= 1000). -// This is a way to prevent xfs_quota management from conflicting with containers/storage. +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / // +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + // Then try to create a test directory with the next project id and set a quota // on it. If that works, continue to scan existing containers to map allocated // project ids. -// func NewControl(basePath string) (*Control, error) { // // Get project id of parent dir as minimal id to be used by driver @@ -104,8 +135,15 @@ func NewControl(basePath string) (*Control, error) { if err != nil { return nil, err } - minProjectID++ + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + } // // create backing filesystem device node // @@ -119,22 +157,25 @@ func NewControl(basePath string) (*Control, error) { // a quota on the first available project id // quota := Quota{ - Size: 0, - } - if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { - return nil, err + Size: 0, + Inodes: 0, } q := Control{ backingFsBlockDev: backingFsBlockDev, nextProjectID: minProjectID + 1, quotas: make(map[string]uint32), + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err } // // get first project id to be used for next container // - err = q.findNextProjectID(basePath) + err = q.findNextProjectID() if err != nil { return nil, err } @@ -166,30 +207,63 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { // // set the quota limit for the container's project id // - logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) - return setProjectQuota(q.backingFsBlockDev, projectID, quota) + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + delete(q.quotas, targetPath) } // setProjectQuota - set the quota for project id on xfs block device -func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { var d C.fs_disk_quota_t d.d_version = C.FS_DQUOT_VERSION d.d_id = C.__u32(projectID) - d.d_flags = C.XFS_PROJ_QUOTA + d.d_flags = C.FS_PROJ_QUOTA - d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } - var cs = C.CString(backingFsBlockDev) + var cs = C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", - projectID, backingFsBlockDev, errno.Error()) + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) } return nil @@ -202,6 +276,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error { return err } quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) return nil } @@ -235,8 +310,8 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), uintptr(unsafe.Pointer(&d)), 0, 0) if errno != 0 { - return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", - projectID, q.backingFsBlockDev, errno.Error()) + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) } return d, nil @@ -254,7 +329,7 @@ func getProjectID(targetPath string) (uint32, error) { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } return uint32(fsx.fsx_projid), nil @@ -272,14 +347,14 @@ func setProjectID(targetPath string, projectID uint32) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } fsx.fsx_projid = C.__u32(projectID) fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) } return nil @@ -287,16 +362,16 @@ func setProjectID(targetPath string, projectID uint32) error { // findNextProjectID - find the next project id to be used for containers // by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID(home string) error { - files, err := ioutil.ReadDir(home) +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) if err != nil { - return fmt.Errorf("read directory failed : %s", home) + return fmt.Errorf("read directory failed : %s", q.basePath) } for _, file := range files { if !file.IsDir() { continue } - path := filepath.Join(home, file.Name()) + path := filepath.Join(q.basePath, file.Name()) projid, err := getProjectID(path) if err != nil { return err @@ -322,7 +397,7 @@ func openDir(path string) (*C.DIR, error) { dir := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("Can't open dir") + return nil, fmt.Errorf("can't open dir %v", Cpath) } return dir, nil } @@ -347,10 +422,13 @@ func makeBackingFsDev(home string) (string, error) { } backingFsBlockDev := path.Join(home, "backingFsBlockDev") + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" // Re-create just in case someone copied the home directory over to a new device - unix.Unlink(backingFsBlockDev) - if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) } return backingFsBlockDev, nil diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index be6c75a58..a15e91de2 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -1,14 +1,16 @@ +//go:build !linux || exclude_disk_quota || !cgo // +build !linux exclude_disk_quota !cgo package quota import ( - "github.com/pkg/errors" + "errors" ) // Quota limit params - currently we only control blocks hard limit type Quota struct { - Size uint64 + Size uint64 + Inodes uint64 } // Control - Context to be used by storage driver (e.g. overlay) diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go index 7743dcedb..bbb9cb657 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_aufs && linux // +build !exclude_graphdriver_aufs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go index 40ff1cdd0..425ebd798 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_btrfs && linux // +build !exclude_graphdriver_btrfs,linux package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go index cefe2e8c7..a744eaea1 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_devicemapper && linux && cgo // +build !exclude_graphdriver_devicemapper,linux,cgo package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go index 30e3b4d74..95b77b73e 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -1,3 +1,4 @@ +//go:build !exclude_graphdriver_overlay && linux && cgo // +build !exclude_graphdriver_overlay,linux,cgo package register diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index c748468e5..8e5788a43 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,4 +1,5 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go index 5d80b8865..7b96c082d 100644 --- a/vendor/github.com/containers/storage/drivers/template.go +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -1,9 +1,8 @@ package graphdriver import ( - "github.com/sirupsen/logrus" - "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" ) // TemplateDriver is just barely enough of a driver that we can implement a @@ -31,7 +30,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel) if err != nil { if err2 := d.Remove(id); err2 != nil { - logrus.Errorf("error removing layer %q: %v", id, err2) + logrus.Errorf("Removing layer %q: %v", id, err2) } return err } @@ -44,7 +43,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa } if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil { if err2 := d.Remove(id); err2 != nil { - logrus.Errorf("error removing layer %q: %v", id, err2) + logrus.Errorf("Removing layer %q: %v", id, err2) } return err } diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go index 8ac80ee1d..d94756bdd 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package vfs // import "github.com/containers/storage/drivers/vfs" diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 1b58e2f63..9deaa7c3a 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -5,8 +5,10 @@ import ( "io" "os" "path/filepath" + "runtime" "strconv" "strings" + "unicode" graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" @@ -27,7 +29,7 @@ var ( const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("vfs", Init) + graphdriver.MustRegister("vfs", Init) } // Init returns a new VFS driver. @@ -97,7 +99,7 @@ func (d *Driver) Status() [][2]string { // Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. func (d *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil + return nil, nil //nolint: nilnil } // Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. @@ -170,6 +172,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool }() rootPerms := defaultPerms + if runtime.GOOS == "darwin" { + rootPerms = os.FileMode(0700) + } + if parent != "" { st, err := system.Stat(d.dir(parent)) if err != nil { @@ -189,7 +195,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool if parent != "" { parentDir, err := d.Get(parent, graphdriver.MountOpts{}) if err != nil { - return fmt.Errorf("%s: %s", parent, err) + return fmt.Errorf("%s: %w", parent, err) } if err := dirCopy(parentDir, dir); err != nil { return err @@ -260,6 +266,40 @@ func (d *Driver) Exists(id string) bool { return err == nil } +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} + +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + entries, err := os.ReadDir(d.homes[0]) + if err != nil { + return nil, err + } + + layers := make([]string, 0) + + for _, entry := range entries { + id := entry.Name() + // Does it look like a datadir directory? + if !entry.IsDir() || !nameLooksLikeID(id) { + continue + } + + layers = append(layers, id) + } + + return layers, err +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { if len(d.homes) > 1 { diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 149151741..66aa460cf 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package windows @@ -9,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -23,7 +23,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/backuptar" "github.com/Microsoft/hcsshim" - "github.com/containers/storage/drivers" + graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" @@ -53,7 +53,7 @@ var ( // init registers the windows graph drivers to the register. func init() { - graphdriver.Register("windowsfilter", InitFilter) + graphdriver.MustRegister("windowsfilter", InitFilter) // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes // debugging issues in the re-exec codepath significantly easier. if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { @@ -103,7 +103,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e } if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { - return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err) } d := &Driver{ @@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool { return result } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // CreateFromTemplate creates a layer with the same contents and parent as another layer. func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) @@ -252,7 +257,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt storageOptions, err := parseStorageOpt(storageOpt) if err != nil { - return fmt.Errorf("Failed to parse storage options - %s", err) + return fmt.Errorf("failed to parse storage options - %s", err) } if storageOptions.size != 0 { @@ -266,7 +271,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err) } if err := d.setLayerChain(id, layerChain); err != nil { @@ -474,7 +479,7 @@ func (d *Driver) Put(id string) error { // We use this opportunity to cleanup any -removing folders which may be // still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) + items, err := os.ReadDir(d.info.HomeDir) if err != nil { if os.IsNotExist(err) { return nil @@ -810,7 +815,7 @@ func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths [] } if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err) } return strconv.ParseInt(output.String(), 10, 64) @@ -869,7 +874,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths .. // resolveID computes the layerID information based on the given id. func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID")) if os.IsNotExist(err) { return id, nil } else if err != nil { @@ -880,23 +885,23 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) } // getLayerChain returns the layer chain information. func (d *Driver) getLayerChain(id string) ([]string, error) { jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) + content, err := os.ReadFile(jPath) if os.IsNotExist(err) { return nil, nil } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + return nil, fmt.Errorf("unable to read layerchain file - %s", err) } var layerChain []string err = json.Unmarshal(content, &layerChain) if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err) } return layerChain, nil @@ -906,13 +911,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) { func (d *Driver) setLayerChain(id string, chain []string) error { content, err := json.Marshal(&chain) if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) + return fmt.Errorf("failed to marshall layerchain json - %s", err) } jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) + err = os.WriteFile(jPath, content, 0600) if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) + return fmt.Errorf("unable to write layerchain file - %s", err) } return nil @@ -999,7 +1004,7 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { } options.size = uint64(size) default: - return nil, fmt.Errorf("Unknown storage option: %s", key) + return nil, fmt.Errorf("unknown storage option: %s", key) } } return &options, nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e034bf152..aeef64103 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd // +build linux freebsd package zfs @@ -17,9 +18,8 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/mistifyio/go-zfs" + zfs "github.com/mistifyio/go-zfs/v3" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -33,7 +33,7 @@ type zfsOptions struct { const defaultPerms = os.FileMode(0555) func init() { - graphdriver.Register("zfs", Init) + graphdriver.MustRegister("zfs", Init) } // Logger returns a zfs logger implementation. @@ -54,15 +54,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { if _, err := exec.LookPath("zfs"); err != nil { logger.Debugf("zfs command is not available: %v", err) - return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") + return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) } - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) + file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600) if err != nil { logger.Debugf("cannot open /dev/zfs: %v", err) - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) + return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) } - defer file.Close() + defer unix.Close(file) options, err := parseOptions(opt.DriverOptions) if err != nil { @@ -90,7 +90,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { filesystems, err := zfs.Filesystems(options.fsName) if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err) } filesystemsCache := make(map[string]bool, len(filesystems)) @@ -103,15 +103,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { } if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) if err != nil { - return nil, fmt.Errorf("Failed to get root uid/gid: %v", err) + return nil, fmt.Errorf("failed to get root uid/gid: %w", err) } if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + return nil, fmt.Errorf("failed to create '%s': %w", base, err) } d := &Driver{ @@ -140,7 +140,7 @@ func parseOptions(opt []string) (zfsOptions, error) { case "zfs.mountopt": options.mountOptions = val default: - return options, fmt.Errorf("Unknown option %s", key) + return options, fmt.Errorf("unknown option %s", key) } } return options, nil @@ -149,7 +149,7 @@ func parseOptions(opt []string) (zfsOptions, error) { func lookupZfsDataset(rootdir string) (string, error) { var stat unix.Stat_t if err := unix.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + return "", fmt.Errorf("failed to access '%s': %w", rootdir, err) } wantedDev := stat.Dev @@ -168,7 +168,7 @@ func lookupZfsDataset(rootdir string) (string, error) { } } - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) + return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) } // Driver holds information about the driver, such as zfs dataset, options and cache. @@ -315,7 +315,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { if opts != nil { rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) if err != nil { - return fmt.Errorf("Failed to get root uid/gid: %v", err) + return fmt.Errorf("failed to get root uid/gid: %w", err) } mountLabel = opts.MountLabel } @@ -341,22 +341,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel) if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil { - return errors.Wrap(err, "error creating zfs mount") + return fmt.Errorf("creating zfs mount: %w", err) } defer func() { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + if err := detachUnmount(mountpoint); err != nil { + logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err) } }() if err := os.Chmod(mountpoint, defaultPerms); err != nil { - return errors.Wrap(err, "error setting permissions on zfs mount") + return fmt.Errorf("setting permissions on zfs mount: %w", err) } // this is our first mount after creation of the filesystem, and the root dir may still have root // permissions instead of the remapped root uid:gid (if user namespaces are enabled): if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint) + return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err) } } @@ -377,7 +377,7 @@ func parseStorageOpt(storageOpt map[string]string) (string, error) { case "size": return v, nil default: - return "0", fmt.Errorf("Unknown option %s", key) + return "0", fmt.Errorf("unknown option %s", key) } } return "0", nil @@ -459,13 +459,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { - return "", errors.Wrap(err, "error creating zfs mount") + return "", fmt.Errorf("creating zfs mount: %w", err) } if remountReadOnly { opts = label.FormatMountLabel("remount,ro", options.MountLabel) if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { - return "", errors.Wrap(err, "error remounting zfs mount read-only") + return "", fmt.Errorf("remounting zfs mount read-only: %w", err) } } @@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error { logger.Debugf(`unmount("%s")`, mountpoint) - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { @@ -506,6 +506,11 @@ func (d *Driver) Exists(id string) bool { return d.filesystemsCache[d.zfsPath(id)] } +// List layers (not including additional image stores) +func (d *Driver) ListLayers() ([]string, error) { + return nil, graphdriver.ErrNotSupported +} + // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index bf6905159..c3c73c61e 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -2,10 +2,8 @@ package zfs import ( "fmt" - "strings" - "github.com/containers/storage/drivers" - "github.com/pkg/errors" + graphdriver "github.com/containers/storage/drivers" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -13,27 +11,23 @@ import ( func checkRootdirFs(rootdir string) error { var buf unix.Statfs_t if err := unix.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + return fmt.Errorf("failed to access '%s': %s", rootdir, err) } // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites) } return nil } func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } + return id +} - return id[:maxlen] +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index edcb1da36..d43ba5c2b 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -1,9 +1,11 @@ package zfs import ( + "fmt" + graphdriver "github.com/containers/storage/drivers" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootDir string) error { @@ -18,7 +20,7 @@ func checkRootdirFs(rootDir string) error { if fsMagic != graphdriver.FsMagicZfs { logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir) + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites) } return nil @@ -27,3 +29,7 @@ func checkRootdirFs(rootDir string) error { func getMountpoint(id string) string { return id } + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go index 643b169bc..738b0ae1b 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -1,11 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 5fc810b89..de6e37754 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "github.com/containers/storage/types" ) @@ -55,4 +57,9 @@ var ( ErrStoreIsReadOnly = types.ErrStoreIsReadOnly // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") ) diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index e7ca56e64..daab3a595 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -1,32 +1,50 @@ -go 1.14 +go 1.17 module github.com/containers/storage require ( - github.com/BurntSushi/toml v0.3.1 - github.com/Microsoft/go-winio v0.5.0 - github.com/Microsoft/hcsshim v0.8.17 - github.com/docker/go-units v0.4.0 + github.com/BurntSushi/toml v1.2.1 + github.com/Microsoft/go-winio v0.5.2 + github.com/Microsoft/hcsshim v0.9.6 + github.com/containerd/stargz-snapshotter/estargz v0.13.0 + github.com/cyphar/filepath-securejoin v0.2.3 + github.com/docker/go-units v0.5.0 github.com/google/go-intervals v0.0.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.12.3 + github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.15.14 github.com/klauspost/pgzip v1.2.5 - github.com/mattn/go-shellwords v1.0.11 - github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible - github.com/moby/sys/mountinfo v0.4.1 + github.com/mattn/go-shellwords v1.0.12 + github.com/mistifyio/go-zfs/v3 v3.0.0 + github.com/moby/sys/mountinfo v0.6.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/runc v1.0.0-rc95 + github.com/opencontainers/runc v1.1.4 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/opencontainers/selinux v1.8.1 - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.7.0 + github.com/opencontainers/selinux v1.10.2 + github.com/sirupsen/logrus v1.9.0 + github.com/stretchr/testify v1.8.1 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/tchap/go-patricia v2.3.0+incompatible - github.com/ulikunitz/xz v0.5.10 - github.com/vbatts/tar-split v0.11.1 - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 + github.com/ulikunitz/xz v0.5.11 + github.com/vbatts/tar-split v0.11.2 + golang.org/x/net v0.0.0-20210825183410-e898025ed96a + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 gotest.tools v2.2.0+incompatible ) + +require ( + github.com/containerd/cgroups v1.0.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/uuid v1.2.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opencensus.io v0.22.3 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 5373d0597..7ff0c337c 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -10,6 +10,7 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -34,8 +35,9 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -45,8 +47,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -54,13 +56,16 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.17 h1:yFHH5bghP9ij5Y34PPaMOE8g//oXZ0uJQeMENVo2zcI= -github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -68,6 +73,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= @@ -77,6 +83,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -85,11 +92,13 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -97,9 +106,12 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -121,6 +133,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -134,6 +147,7 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -161,11 +175,15 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= +github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -195,13 +213,16 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -214,15 +235,20 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -234,6 +260,8 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -255,11 +283,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -268,6 +300,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -289,6 +322,8 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -303,8 +338,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -314,8 +348,11 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -326,6 +363,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -338,6 +376,7 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -347,6 +386,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -363,16 +403,18 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -383,8 +425,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= +github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -393,13 +436,15 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -408,48 +453,61 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= -github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= +github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -464,8 +522,9 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -476,8 +535,10 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.m github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.1 h1:yvEZh7CsfnJNwKzG9ZeXwbvR05RAZsu5RS/3vA6qFTA= -github.com/opencontainers/selinux v1.8.1/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= +github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -519,11 +580,14 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -532,8 +596,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -556,13 +621,19 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -573,14 +644,15 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= -github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -588,7 +660,6 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= @@ -597,12 +668,14 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -610,6 +683,7 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -657,6 +731,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -674,6 +749,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -685,13 +761,17 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -703,9 +783,12 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -724,6 +807,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -750,6 +834,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -757,6 +842,7 @@ golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -765,8 +851,17 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -774,13 +869,16 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -796,6 +894,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -814,8 +913,13 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -832,11 +936,13 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -858,7 +964,10 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -872,8 +981,12 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -884,13 +997,17 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -905,13 +1022,15 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -934,6 +1053,7 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -942,8 +1062,12 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -952,6 +1076,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index f870b9cee..be9e45cfd 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,9 +1,12 @@ package storage import ( + "fmt" + "strings" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/types" "github.com/google/go-intervals/intervalset" - "github.com/pkg/errors" ) // idSet represents a set of integer IDs. It is stored as an ordered set of intervals. @@ -113,7 +116,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) { n -= i.length() } if n > 0 { - return nil, errors.New("could not find enough available IDs") + return nil, types.ErrNoAvailableIDs } return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil } @@ -218,3 +221,45 @@ func maxInt(a, b int) int { } return a } + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings) + } + return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index bca25a65b..577b6f8ed 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -1,7 +1,7 @@ package storage import ( - "io/ioutil" + "fmt" "os" "path/filepath" "strings" @@ -9,11 +9,11 @@ import ( "time" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) const ( @@ -94,11 +94,17 @@ type Image struct { Flags map[string]interface{} `json:"flags,omitempty"` } -// ROImageStore provides bookkeeping for information about Images. -type ROImageStore interface { - ROFileBasedStore - ROMetadataStore - ROBigDataStore +// roImageStore provides bookkeeping for information about Images. +type roImageStore interface { + roMetadataStore + roBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Exists checks if there is an image with the given ID or name. Exists(id string) bool @@ -106,10 +112,6 @@ type ROImageStore interface { // Get retrieves information about an image given an ID or name. Get(id string) (*Image, error) - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - // Images returns a slice enumerating the known images. Images() ([]Image, error) @@ -120,40 +122,55 @@ type ROImageStore interface { ByDigest(d digest.Digest) ([]*Image, error) } -// ImageStore provides bookkeeping for information about Images. -type ImageStore interface { - ROImageStore - RWFileBasedStore - RWMetadataStore - RWImageBigDataStore - FlaggableStore +// rwImageStore provides bookkeeping for information about Images. +type rwImageStore interface { + roImageStore + rwMetadataStore + rwImageBigDataStore + flaggableStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() // Create creates an image that has a specified ID (or a random one) and // optional names, using the specified layer as its topmost (hopefully // read-only) layer. That layer can be referenced by multiple images. Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) - // SetNames replaces the list of names associated with an image with the - // supplied values. The values are expected to be valid normalized + // updateNames modifies names associated with an image based on (op, names). + // The values are expected to be valid normalized // named image references. - SetNames(id string, names []string) error + updateNames(id string, names []string, op updateNameOperation) error // Delete removes the record of the image. Delete(id string) error + addMappedTopLayer(id, layer string) error + removeMappedTopLayer(id, layer string) error + // Wipe removes records of all images. Wipe() error } type imageStore struct { - lockfile Locker + // The following fields are only set when constructing imageStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. dir string - images []*Image - idindex *truncindex.TruncIndex - byid map[string]*Image - byname map[string]*Image - bydigest map[digest.Digest][]*Image - loadMut sync.Mutex + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image } func copyImage(i *Image) *Image { @@ -186,6 +203,191 @@ func copyImageSlice(slice []*Image) []*Image { return nil } +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *imageStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *imageStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of imageStore construction, every other caller +// should use startReading() instead. +func (r *imageStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + _, modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + // r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally + // trigger a load below; we (lock and) reloadIfChanged() again. + + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + if err := inProcessLocked(func() error { + // We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile, + // so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock. + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }); err != nil { + if !tryLockedForWriting { + return err + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload once more because the on-disk state could have been modified + // after we released the lock. + // If that, _again_, finds inconsistent state, just give up. + // We could, plausibly, retry a few times, but that inconsistent state (duplicate image names) + // shouldn’t be saved (by correct implementations) in the first place. + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(false) + return err + }); err != nil { + return fmt.Errorf("(even after successfully cleaning up once:) %w", err) + } + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *imageStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *imageStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *imageStore) modified() (lockfile.LastWrite, bool, error) { + return r.lockfile.ModifiedSince(r.lastWrite) +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, modified, err := r.modified() + if err != nil { + return false, err + } + // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load() + // and modify no fields, to ensure they see fresh data: + // r.lockfile.Modified() only returns true once per change. Without an exclusive lock, + // one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could + // see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale. + if modified { + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + } + return false, nil +} + +// Requires startReading or startWriting. func (r *imageStore) Images() ([]Image, error) { images := make([]Image, len(r.images)) for i := range r.images { @@ -216,12 +418,13 @@ func bigDataNameIsManifest(name string) bool { // recomputeDigests takes a fixed digest and a name-to-digest map and builds a // list of the unique values that would identify the image. +// The caller must hold r.inProcessLock for writing. func (i *Image) recomputeDigests() error { validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) digests := make(map[digest.Digest]struct{}) if i.Digest != "" { if err := i.Digest.Validate(); err != nil { - return errors.Wrapf(err, "error validating image digest %q", string(i.Digest)) + return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err) } digests[i.Digest] = struct{}{} validDigests = append(validDigests, i.Digest) @@ -230,8 +433,8 @@ func (i *Image) recomputeDigests() error { if !bigDataNameIsManifest(name) { continue } - if digest.Validate() != nil { - return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name) + if err := digest.Validate(); err != nil { + return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err) } // Deduplicate the digest values. if _, known := digests[digest]; !known { @@ -246,65 +449,85 @@ func (i *Image) recomputeDigests() error { return nil } -func (r *imageStore) Load() error { - shouldSave := false +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *imageStore) load(lockedForWriting bool) (bool, error) { rpath := r.imagespath() - data, err := ioutil.ReadFile(rpath) + data, err := os.ReadFile(rpath) if err != nil && !os.IsNotExist(err) { - return err + return false, err } + images := []*Image{} - idlist := []string{} + if len(data) != 0 { + if err := json.Unmarshal(data, &images); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) + } + } + idlist := make([]string, 0, len(images)) ids := make(map[string]*Image) names := make(map[string]*Image) digests := make(map[digest.Digest][]*Image) - if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(images)) - for n, image := range images { - ids[image.ID] = images[n] - idlist = append(idlist, image.ID) - for _, name := range image.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } - } - // Compute the digest list. - err = image.recomputeDigests() - if err != nil { - return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names) - } - for _, name := range image.Names { - names[name] = image - } - for _, digest := range image.Digests { - list := digests[digest] - digests[digest] = append(list, image) + var errorToResolveBySaving error // == nil + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateImageNames } - image.ReadOnly = !r.IsReadWrite() } + // Compute the digest list. + if err := image.recomputeDigests(); err != nil { + return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.lockfile.IsReadWrite() } - if shouldSave && (!r.IsReadWrite() || !r.Locked()) { - return ErrDuplicateImageNames + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } } r.images = images - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.byname = names r.bydigest = digests - if shouldSave { - return r.Save() + if errorToResolveBySaving != nil { + return false, r.Save() } - return nil + return false, nil } +// Save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). func (r *imageStore) Save() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) - } - if !r.Locked() { - return errors.New("image store is not locked for writing") + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } + r.lockfile.AssertLockedForWriting() rpath := r.imagespath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { return err @@ -313,55 +536,77 @@ func (r *imageStore) Save() error { if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) + if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { + return err + } + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + return nil } -func newImageStore(dir string) (ImageStore, error) { +func newImageStore(dir string) (rwImageStore, error) { if err := os.MkdirAll(dir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock")) if err != nil { return nil, err } - lockfile.Lock() - defer lockfile.Unlock() istore := imageStore{ lockfile: lockfile, dir: dir, + images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), bydigest: make(map[digest.Digest][]*Image), } - if err := istore.Load(); err != nil { + if err := istore.startWritingWithReload(false); err != nil { + return nil, err + } + defer istore.stopWriting() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(true); err != nil { return nil, err } return &istore, nil } -func newROImageStore(dir string) (ROImageStore, error) { - lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) +func newROImageStore(dir string) (roImageStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock")) if err != nil { return nil, err } - lockfile.RLock() - defer lockfile.Unlock() istore := imageStore{ lockfile: lockfile, dir: dir, + images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), bydigest: make(map[digest.Digest][]*Image), } - if err := istore.Load(); err != nil { + if err := istore.startReadingWithReload(false); err != nil { + return nil, err + } + defer istore.stopReading() + istore.lastWrite, err = istore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + if _, err := istore.load(false); err != nil { return nil, err } return &istore, nil } +// Requires startReading or startWriting. func (r *imageStore) lookup(id string) (*Image, bool) { if image, ok := r.byid[id]; ok { return image, ok @@ -374,25 +619,27 @@ func (r *imageStore) lookup(id string) (*Image, bool) { return nil, false } +// Requires startWriting. func (r *imageStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } delete(image.Flags, flag) return r.Save() } +// Requires startWriting. func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } if image.Flags == nil { image.Flags = make(map[string]interface{}) @@ -401,9 +648,10 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { return r.Save() } +// Requires startWriting. func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { - if !r.IsReadWrite() { - return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if id == "" { id = stringid.GenerateRandomID() @@ -414,59 +662,62 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } } if _, idInUse := r.byid[id]; idInUse { - return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id) + return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID) } names = dedupeNames(names) for _, name := range names { if image, nameInUse := r.byname[name]; nameInUse { - return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID) + return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName) } } if created.IsZero() { created = time.Now().UTC() } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Digests: nil, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - err := image.recomputeDigests() - if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - for _, name := range names { - r.byname[name] = image - } - for _, digest := range image.Digests { - list := r.bydigest[digest] - r.bydigest[digest] = append(list, image) - } - err = r.Save() - image = copyImage(image) + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, fmt.Errorf("validating digests for new image: %w", err) } + r.images = append(r.images, image) + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) + } + err = r.Save() + image = copyImage(image) return image, err } +// Requires startWriting. func (r *imageStore) addMappedTopLayer(id, layer string) error { if image, ok := r.lookup(id); ok { image.MappedTopLayers = append(image.MappedTopLayers, layer) return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startWriting. func (r *imageStore) removeMappedTopLayer(id, layer string) error { if image, ok := r.lookup(id); ok { initialLen := len(image.MappedTopLayers) @@ -477,64 +728,75 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error { } return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startReading or startWriting. func (r *imageStore) Metadata(id string) (string, error) { if image, ok := r.lookup(id); ok { return image.Metadata, nil } - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startWriting. func (r *imageStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if image, ok := r.lookup(id); ok { image.Metadata = metadata return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// The caller must hold r.inProcessLock for writing. func (r *imageStore) removeName(image *Image, name string) { image.Names = stringSliceWithoutValue(image.Names, name) } +// The caller must hold r.inProcessLock for writing. func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } -func (r *imageStore) SetNames(id string, names []string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) +// Requires startWriting. +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - image.addNameToHistory(name) + image, ok := r.lookup(id) + if !ok { + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) } - image.Names = names - return r.Save() + r.byname[name] = image + image.addNameToHistory(name) } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + image.Names = names + return r.Save() } +// Requires startWriting. func (r *imageStore) Delete(id string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } id = image.ID toDeleteIndex := -1 @@ -544,7 +806,9 @@ func (r *imageStore) Delete(id string) error { } } delete(r.byid, id) - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) for _, name := range image.Names { delete(r.byname, name) } @@ -573,55 +837,50 @@ func (r *imageStore) Delete(id string) error { return nil } +// Requires startReading or startWriting. func (r *imageStore) Get(id string) (*Image, error) { if image, ok := r.lookup(id); ok { return copyImage(image), nil } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) -} - -func (r *imageStore) Lookup(name string) (id string, err error) { - if image, ok := r.lookup(name); ok { - return image.ID, nil - } - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } +// Requires startReading or startWriting. func (r *imageStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { if images, ok := r.bydigest[d]; ok { return copyImageSlice(images), nil } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d) + return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown) } +// Requires startReading or startWriting. func (r *imageStore) BigData(id, key string) ([]byte, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - return ioutil.ReadFile(r.datapath(image.ID, key)) + return os.ReadFile(r.datapath(image.ID, key)) } +// Requires startReading or startWriting. func (r *imageStore) BigDataSize(id, key string) (int64, error) { if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) - } - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - if size, ok := image.BigDataSizes[key]; ok { + if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil. return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { @@ -630,27 +889,26 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } +// Requires startReading or startWriting. func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := image.BigDataDigests[key]; ok { + if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil. return d, nil } return "", ErrDigestUnknown } +// Requires startReading or startWriting. func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } return copyStringSlice(image.BigDataNames), nil } @@ -666,16 +924,17 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { return modified } +// Requires startWriting. func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) } - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } err := os.MkdirAll(r.datadir(image.ID), 0700) if err != nil { @@ -684,10 +943,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func var newDigest digest.Digest if bigDataNameIsManifest(key) { if digestManifest == nil { - return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown) } if newDigest, err = digestManifest(data); err != nil { - return errors.Wrapf(err, "error digesting manifest") + return fmt.Errorf("digesting manifest: %w", err) } } else { newDigest = digest.Canonical.FromBytes(data) @@ -731,7 +990,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func } } if err = image.recomputeDigests(); err != nil { - return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID) + return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err) } for _, newDigest := range image.Digests { // add the image to the list of images in the digest-based index which @@ -750,9 +1009,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func return err } +// Requires startWriting. func (r *imageStore) Wipe() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) for id := range r.byid { @@ -765,50 +1025,3 @@ func (r *imageStore) Wipe() error { } return nil } - -func (r *imageStore) Lock() { - r.lockfile.Lock() -} - -func (r *imageStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *imageStore) RLock() { - r.lockfile.RLock() -} - -func (r *imageStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *imageStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *imageStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *imageStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *imageStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *imageStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *imageStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err -} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 9b05474bb..f14108be5 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -2,9 +2,9 @@ package storage import ( "bytes" + "errors" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -18,15 +18,17 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" + "github.com/opencontainers/selinux/go-selinux" + "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" @@ -35,8 +37,28 @@ import ( const ( tarSplitSuffix = ".tar-split.gz" incompleteFlag = "incomplete" + // maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state + // in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state) + // until we just give up. + maxLayerStoreCleanupIterations = 3 ) +type layerLocations uint8 + +// The backing store is split in two json files, one (the volatile) +// that is written without fsync() meaning it isn't as robust to +// unclean shutdown +const ( + stableLayerLocation layerLocations = 1 << iota + volatileLayerLocation + + numLayerLocationIndex = iota +) + +func layerLocationFromIndex(index int) layerLocations { + return 1 << index +} + // A Layer is a record of a copy-on-write layer that's stored by the lower // level graph driver. type Layer struct { @@ -61,13 +83,26 @@ type Layer struct { MountLabel string `json:"mountlabel,omitempty"` // MountPoint is the path where the layer is mounted, or where it was most - // recently mounted. This can change between subsequent Unmount() and - // Mount() calls, so the caller should consult this value after Mount() - // succeeds to find the location of the container's root filesystem. + // recently mounted. + // + // WARNING: This field is a snapshot in time: (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // Users that need to handle concurrent mount/unmount attempts should not access this + // field at all, and should only use the path returned by .Mount() (and that’s only + // assuming no other user will concurrently decide to unmount that mount point). MountPoint string `json:"-"` // MountCount is used as a reference count for the container's layer being // mounted at the mount point. + // + // WARNING: This field is a snapshot in time; (except for users inside c/storage that + // hold the mount lock) the true value can change between subsequent + // calls to c/storage API. + // + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. MountCount int `json:"-"` // Created is the datestamp for when this layer was created. Older @@ -118,6 +153,9 @@ type Layer struct { // ReadOnly is true if this layer resides in a read-only layer store. ReadOnly bool `json:"-"` + // volatileStore is true if the container is from the volatile json file + volatileStore bool `json:"-"` + // BigDataNames is a list of names of data items that we keep for the // convenience of the caller. They can be large, and are only in // memory when being read from or written to disk. @@ -136,13 +174,19 @@ type DiffOptions struct { Compression *archive.Compression } -// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// roLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. -type ROLayerStore interface { - ROFileBasedStore - ROMetadataStore - ROLayerBigDataStore +type roLayerStore interface { + roMetadataStore + roLayerBigDataStore + + // startReading makes sure the store is fresh, and locks it for reading. + // If this succeeds, the caller MUST call stopReading(). + startReading() error + + // stopReading releases locks obtained by startReading. + stopReading() // Exists checks if a layer with the specified name or ID is known. Exists(id string) bool @@ -176,10 +220,6 @@ type ROLayerStore interface { // found, it returns an error. Size(name string) (int64, error) - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - // LayersByCompressedDigest returns a slice of the layers with the // specified compressed digest value recorded for them. LayersByCompressedDigest(d digest.Digest) ([]Layer, error) @@ -192,15 +232,21 @@ type ROLayerStore interface { Layers() ([]Layer, error) } -// LayerStore wraps a graph driver, adding the ability to refer to layers by +// rwLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. -type LayerStore interface { - ROLayerStore - RWFileBasedStore - RWMetadataStore - FlaggableStore - RWLayerBigDataStore +type rwLayerStore interface { + roLayerStore + rwMetadataStore + flaggableStore + rwLayerBigDataStore + + // startWriting makes sure the store is fresh, and locks it for writing. + // If this succeeds, the caller MUST call stopWriting(). + startWriting() error + + // stopWriting releases locks obtained by startWriting. + stopWriting() // Create creates a new layer, optionally giving it a specified ID rather than // a randomly-generated one, either inheriting data from another specified @@ -217,9 +263,8 @@ type LayerStore interface { // Put combines the functions of CreateWithFlags and ApplyDiff. Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) - // SetNames replaces the list of names associated with a layer with the - // supplied values. - SetNames(id string, names []string) error + // updateNames modifies names associated with a layer based on (op, names). + updateNames(id string, names []string, op updateNameOperation) error // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -233,8 +278,15 @@ type LayerStore interface { // The mappings used by the container can be specified. Mount(id string, options drivers.MountOpts) (string, error) - // Unmount unmounts a layer when it is no longer in use. - Unmount(id string, force bool) (bool, error) + // unmount unmounts a layer when it is no longer in use. + // If conditional is set, it will fail with ErrLayerNotMounted if the layer is not mounted (without conditional, the caller is + // making a promise that the layer is actually mounted). + // If force is set, it will physically try to unmount it even if it is mounted multple times, or even if (!conditional and) + // there are no records of it being mounted in the first place. + // It returns whether the layer was still mounted at the time this function returned. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. + unmount(id string, force bool, conditional bool) (bool, error) // Mounted returns number of times the layer has been mounted. Mounted(id string) (int, error) @@ -260,32 +312,52 @@ type LayerStore interface { // DifferTarget gets the location where files are stored for the layer. DifferTarget(id string) (string, error) - // LoadLocked wraps Load in a locked state. This means it loads the store - // and cleans-up invalid layers if needed. - LoadLocked() error - // PutAdditionalLayer creates a layer using the diff contained in the additional layer // store. // This API is experimental and can be changed without bumping the major version number. PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) + + // Clean up unreferenced layers + GarbageCollect() error + + // supportsShifting() returns true if the driver.Driver.SupportsShifting(). + supportsShifting() bool } type layerStore struct { - lockfile Locker - mountsLockfile Locker - rundir string - driver drivers.Driver - layerdir string - layers []*Layer - idindex *truncindex.TruncIndex - byid map[string]*Layer - byname map[string]*Layer - bymount map[string]*Layer - bycompressedsum map[digest.Digest][]string - byuncompressedsum map[digest.Digest][]string - uidMap []idtools.IDMap - gidMap []idtools.IDMap - loadMut sync.Mutex + // The following fields are only set when constructing layerStore, and must never be modified afterwards. + // They are safe to access without any other locking. + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. + rundir string + jsonPath [numLayerLocationIndex]string + layerdir string + + inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. + // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. + // Almost all users should use startReading() or startWriting(). + lastWrite lockfile.LastWrite + mountsLastWrite lockfile.LastWrite // Only valid if lockfile.IsReadWrite() + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + layerspathsModified [numLayerLocationIndex]time.Time + + // FIXME: This field is only set when constructing layerStore, but locking rules of the driver + // interface itself are not documented here. + driver drivers.Driver +} + +// The caller must hold r.inProcessLock for reading. +func layerLocation(l *Layer) layerLocations { + if l.volatileStore { + return volatileLayerLocation + } + return stableLayerLocation } func copyLayer(l *Layer) *Layer { @@ -304,6 +376,7 @@ func copyLayer(l *Layer) *Layer { UncompressedSize: l.UncompressedSize, CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, + volatileStore: l.volatileStore, BigDataNames: copyStringSlice(l.BigDataNames), Flags: copyStringInterfaceMap(l.Flags), UIDMap: copyIDMap(l.UIDMap), @@ -313,6 +386,271 @@ func copyLayer(l *Layer) *Layer { } } +// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startWriting() instead. +func (r *layerStore) startWritingWithReload(canReload bool) error { + r.lockfile.Lock() + r.inProcessLock.Lock() + succeeded := false + defer func() { + if !succeeded { + r.inProcessLock.Unlock() + r.lockfile.Unlock() + } + }() + + if canReload { + if _, err := r.reloadIfChanged(true); err != nil { + return err + } + } + + succeeded = true + return nil +} + +// startWriting makes sure the store is fresh, and locks it for writing. +// If this succeeds, the caller MUST call stopWriting(). +func (r *layerStore) startWriting() error { + return r.startWritingWithReload(true) +} + +// stopWriting releases locks obtained by startWriting. +func (r *layerStore) stopWriting() { + r.inProcessLock.Unlock() + r.lockfile.Unlock() +} + +// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +// +// This is an internal implementation detail of layerStore construction, every other caller +// should use startReading() instead. +func (r *layerStore) startReadingWithReload(canReload bool) error { + // inProcessLocked calls the nested function with r.inProcessLock held for writing. + inProcessLocked := func(fn func() error) error { + r.inProcessLock.Lock() + defer r.inProcessLock.Unlock() + return fn() + } + + r.lockfile.RLock() + unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil + defer func() { + if unlockFn != nil { + unlockFn() + } + }() + r.inProcessLock.RLock() + unlockFn = r.stopReading + + if canReload { + // If we are lucky, we can just hold the read locks, check that we are fresh, and continue. + modified, err := r.modified() + if err != nil { + return err + } + if modified { + // We are unlucky, and need to reload. + // NOTE: Multiple goroutines can get to this place approximately simultaneously. + r.inProcessLock.RUnlock() + unlockFn = r.lockfile.Unlock + + cleanupsDone := 0 + for { + // First try reloading with r.lockfile held for reading. + // r.inProcessLock will serialize all goroutines that got here; + // each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data. + var tryLockedForWriting bool + err := inProcessLocked(func() error { + var err error + tryLockedForWriting, err = r.reloadIfChanged(false) + return err + }) + if err == nil { + break + } + if !tryLockedForWriting { + return err + } + if cleanupsDone >= maxLayerStoreCleanupIterations { + return fmt.Errorf("(even after %d cleanup attempts:) %w", cleanupsDone, err) + } + // Not good enough, we need r.lockfile held for writing. So, let’s do that. + unlockFn() + unlockFn = nil + + r.lockfile.Lock() + unlockFn = r.lockfile.Unlock + if err := inProcessLocked(func() error { + _, err := r.reloadIfChanged(true) + return err + }); err != nil { + return err + } + unlockFn() + unlockFn = nil + + r.lockfile.RLock() + unlockFn = r.lockfile.Unlock + // We need to check for a reload again because the on-disk state could have been modified + // after we released the lock. + cleanupsDone++ + } + + // NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because + // the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock + // protects us against in-process writers modifying data. + // In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date + // and 2) access to the in-memory data is not racy; + // but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state. + + r.inProcessLock.RLock() + } + } + + unlockFn = nil + return nil +} + +// startReading makes sure the store is fresh, and locks it for reading. +// If this succeeds, the caller MUST call stopReading(). +func (r *layerStore) startReading() error { + return r.startReadingWithReload(true) +} + +// stopReading releases locks obtained by startReading. +func (r *layerStore) stopReading() { + r.inProcessLock.RUnlock() + r.lockfile.Unlock() +} + +// modified returns true if the on-disk state (of layers or mounts) has changed (ie if reloadIcHanged may need to modify the store) +// +// Note that unlike containerStore.modified and imageStore.modified, this function is not directly used in layerStore.reloadIfChanged(); +// it exists only to help the reader ensure it has fresh enough state. +// +// The caller must hold r.lockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) modified() (bool, error) { + _, m, err := r.layersModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + if r.lockfile.IsReadWrite() { + // This means we get, release, and re-obtain, r.mountsLockfile if we actually need to do any kind of reload. + // That’s a bit expensive, but hopefully most callers will be read-only and see no changes. + // We can’t eliminate these mountsLockfile accesses given the current assumption that Layer objects have _some_ not-very-obsolete + // mount data. Maybe we can segregate the mount-dependent and mount-independent operations better... + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + _, m, err := r.mountsModified() + if err != nil { + return false, err + } + if m { + return true, nil + } + } + return false, nil +} + +// layersModified() checks if the most recent writer to r.jsonPath[] was a party other than the +// last recorded writer. If so, it returns a lockfile.LastWrite value to record on a successful +// reload. +// It should only be called with the lock held. +// The caller must hold r.inProcessLock for reading. +func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) { + lastWrite, modified, err := r.lockfile.ModifiedSince(r.lastWrite) + if err != nil { + return lockfile.LastWrite{}, modified, err + } + if modified { + return lastWrite, true, nil + } + + // If the layers.json file or container-layers.json has been + // modified manually, then we have to reload the storage in + // any case. + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + info, err := os.Stat(r.jsonPath[locationIndex]) + if err != nil && !os.IsNotExist(err) { + return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err) + } + if info != nil && info.ModTime() != r.layerspathsModified[locationIndex] { + // In this case the LastWrite value is equal to r.lastWrite; writing it back doesn’t hurt. + return lastWrite, true, nil + } + } + + return lockfile.LastWrite{}, false, nil +} + +// reloadIfChanged reloads the contents of the store from disk if it is changed. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// reloadIfChanged() with lockedForWriting could succeed. +func (r *layerStore) reloadIfChanged(lockedForWriting bool) (bool, error) { + lastWrite, layersModified, err := r.layersModified() + if err != nil { + return false, err + } + if layersModified { + // r.load also reloads mounts data; so, on this path, we don’t need to call reloadMountsIfChanged. + if tryLockedForWriting, err := r.load(lockedForWriting); err != nil { + return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again. + } + r.lastWrite = lastWrite + return false, nil + } + if r.lockfile.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err := r.reloadMountsIfChanged(); err != nil { + return false, err + } + } + return false, nil +} + +// mountsModified returns true if the on-disk mount state has changed (i.e. if reloadMountsIfChanged may need to modify the store), +// and a lockfile.LastWrite value for that update. +// +// The caller must hold r.mountsLockfile for reading _or_ writing. +// The caller must hold r.inProcessLock for reading or writing. +func (r *layerStore) mountsModified() (lockfile.LastWrite, bool, error) { + return r.mountsLockfile.ModifiedSince(r.mountsLastWrite) +} + +// reloadMountsIfChanged reloads the contents of mountsPath from disk if it is changed. +// +// The caller must hold r.mountsLockFile for reading or writing. +func (r *layerStore) reloadMountsIfChanged() error { + lastWrite, modified, err := r.mountsModified() + if err != nil { + return err + } + if modified { + if err = r.loadMounts(); err != nil { + return err + } + r.mountsLastWrite = lastWrite + } + return nil +} + +// Requires startReading or startWriting. func (r *layerStore) Layers() ([]Layer, error) { layers := make([]Layer, len(r.layers)) for i := range r.layers { @@ -321,110 +659,209 @@ func (r *layerStore) Layers() ([]Layer, error) { return layers, nil } +// Requires startWriting. +func (r *layerStore) GarbageCollect() error { + layers, err := r.driver.ListLayers() + + if err != nil { + if errors.Is(err, drivers.ErrNotSupported) { + return nil + } + return err + } + + for _, id := range layers { + // Is the id still referenced + if r.byid[id] != nil { + continue + } + + // Remove layer and any related data of unreferenced id + if err := r.driver.Remove(id); err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + } + return nil +} + func (r *layerStore) mountspath() string { return filepath.Join(r.rundir, "mountpoints.json") } -func (r *layerStore) layerspath() string { - return filepath.Join(r.layerdir, "layers.json") -} +// load reloads the contents of the store from disk. +// +// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly +// manage r.lastWrite. +// +// As a side effect, this sets r.mountsLastWrite. +// +// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true +// if it is held for writing. +// The caller must hold r.inProcessLock for WRITING. +// +// If !lockedForWriting and this function fails, the return value indicates whether +// retrying with lockedForWriting could succeed. +func (r *layerStore) load(lockedForWriting bool) (bool, error) { + var modifiedLocations layerLocations -func (r *layerStore) Load() error { - shouldSave := false - rpath := r.layerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } layers := []*Layer{} - idlist := []string{} ids := make(map[string]*Layer) - names := make(map[string]*Layer) - compressedsums := make(map[digest.Digest][]string) - uncompressedsums := make(map[digest.Digest][]string) - if r.IsReadWrite() { - label.ClearLabels() - } - if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(layers)) - for n, layer := range layers { - ids[layer.ID] = layers[n] - idlist = append(idlist, layer.ID) - for _, name := range layer.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } - names[name] = layers[n] + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + rpath := r.jsonPath[locationIndex] + info, err := os.Stat(rpath) + if err != nil { + if !os.IsNotExist(err) { + return false, err } - if layer.CompressedDigest != "" { - compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } else { + r.layerspathsModified[locationIndex] = info.ModTime() + } + data, err := os.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + locationLayers := []*Layer{} + if len(data) != 0 { + if err := json.Unmarshal(data, &locationLayers); err != nil { + return false, fmt.Errorf("loading %q: %w", rpath, err) } - if layer.UncompressedDigest != "" { - uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + + for _, layer := range locationLayers { + // There should be no duplicated ids between json files, but lets check to be sure + if ids[layer.ID] != nil { + continue // skip invalid duplicated layer } - if layer.MountLabel != "" { - label.ReserveLabel(layer.MountLabel) + // Remember where the layer came from + if location == volatileLayerLocation { + layer.volatileStore = true } - layer.ReadOnly = !r.IsReadWrite() + layers = append(layers, layer) + ids[layer.ID] = layer + } + } + + idlist := make([]string, 0, len(layers)) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them. + if r.lockfile.IsReadWrite() { + selinux.ClearLabels() + } + for n, layer := range layers { + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + errorToResolveBySaving = ErrDuplicateLayerNames + modifiedLocations |= layerLocation(conflict) + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + selinux.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.lockfile.IsReadWrite() + // The r.lockfile.IsReadWrite() condition maintains past practice: + // Incomplete layers in a read-only store are not treated as a reason to refuse to use other layers from that store + // (OTOH creating child layers on top would probably lead to problems?). + // We do remove incomplete layers in read-write stores so that we don’t build on top of them. + if layerHasIncompleteFlag(layer) && r.lockfile.IsReadWrite() { + errorToResolveBySaving = errors.New("an incomplete layer exists and can't be cleaned up") } - err = nil } - if shouldSave && (!r.IsReadWrite() || !r.Locked()) { - return ErrDuplicateLayerNames + + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, errorToResolveBySaving + } + if !lockedForWriting { + return true, errorToResolveBySaving + } } r.layers = layers - r.idindex = truncindex.NewTruncIndex(idlist) + r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.byid = ids r.byname = names r.bycompressedsum = compressedsums r.byuncompressedsum = uncompressedsums // Load and merge information about which layers are mounted, and where. - if r.IsReadWrite() { + if r.lockfile.IsReadWrite() { r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() - if err = r.loadMounts(); err != nil { - return err + // We need to reload mounts unconditionally, becuause by creating r.layers from scratch, we have discarded the previous + // information, if any. So, obtain a fresh mountsLastWrite value so that we don’t unnecessarily reload the data + // afterwards. + mountsLastWrite, err := r.mountsLockfile.GetLastWrite() + if err != nil { + return false, err + } + if err := r.loadMounts(); err != nil { + return false, err } + r.mountsLastWrite = mountsLastWrite + // NOTE: We will release mountsLockfile when this function returns, so unlike most of the layer data, the + // r.layers[].MountPoint, r.layers[].MountCount, and r.bymount values might not reflect + // true on-filesystem state already by the time this function returns. + // Code that needs the state to be accurate must lock r.mountsLockfile again, + // and possibly loadMounts() again. + } - // Last step: as we’re writable, try to remove anything that a previous + if errorToResolveBySaving != nil { + if !r.lockfile.IsReadWrite() { + return false, fmt.Errorf("internal error: layerStore.load has shouldSave but !r.lockfile.IsReadWrite") + } + // Last step: try to remove anything that a previous // user of this storage area marked for deletion but didn't manage to // actually delete. - if r.Locked() { - for _, layer := range r.layers { - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true - } + var incompleteDeletionErrors error // = nil + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err := r.deleteInternal(layer.ID) + if err != nil { + // Don't return the error immediately, because deleteInternal does not saveLayers(); + // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully + // deleted incomplete layers have their metadata correctly removed. + incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, + fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) } + modifiedLocations |= layerLocation(layer) } } - if shouldSave { - return r.saveLayers() + if err := r.saveLayers(modifiedLocations); err != nil { + return false, err + } + if incompleteDeletionErrors != nil { + return false, incompleteDeletionErrors } } - - return err -} - -func (r *layerStore) LoadLocked() error { - r.lockfile.Lock() - defer r.lockfile.Unlock() - return r.Load() + return false, nil } +// The caller must hold r.mountsLockfile for reading or writing. +// The caller must hold r.inProcessLock for WRITING. func (r *layerStore) loadMounts() error { mounts := make(map[string]*Layer) mpath := r.mountspath() - data, err := ioutil.ReadFile(mpath) + data, err := os.ReadFile(mpath) if err != nil && !os.IsNotExist(err) { return err } @@ -458,42 +895,77 @@ func (r *layerStore) loadMounts() error { return err } -func (r *layerStore) Save() error { +// save saves the contents of the store to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) save(saveLocations layerLocations) error { r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - defer r.mountsLockfile.Touch() - if err := r.saveLayers(); err != nil { + if err := r.saveLayers(saveLocations); err != nil { return err } return r.saveMounts() } -func (r *layerStore) saveLayers() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) - } - if !r.Locked() { - return errors.New("layer store is not locked for writing") +// saveFor saves the contents of the store relevant for modifiedLayer to disk. +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveFor(modifiedLayer *Layer) error { + return r.save(layerLocation(modifiedLayer)) +} + +// The caller must hold r.lockfile locked for writing. +// The caller must hold r.inProcessLock for WRITING. +func (r *layerStore) saveLayers(saveLocations layerLocations) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - rpath := r.layerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err + r.lockfile.AssertLockedForWriting() + + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + location := layerLocationFromIndex(locationIndex) + if location&saveLocations == 0 { + continue + } + rpath := r.jsonPath[locationIndex] + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + subsetLayers := make([]*Layer, 0, len(r.layers)) + for _, layer := range r.layers { + if layerLocation(layer) == location { + subsetLayers = append(subsetLayers, layer) + } + } + + jldata, err := json.Marshal(&subsetLayers) + if err != nil { + return err + } + opts := ioutils.AtomicFileWriterOptions{} + if location == volatileLayerLocation { + opts.NoSync = true + } + if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil { + return err + } + r.layerspathsModified[locationIndex] = opts.ModTime } - jldata, err := json.Marshal(&r.layers) + lw, err := r.lockfile.RecordWrite() if err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jldata, 0600) + r.lastWrite = lw + return nil } +// The caller must hold r.mountsLockfile for writing. +// The caller must hold r.inProcessLock for WRITING. func (r *layerStore) saveMounts() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) - } - if !r.mountsLockfile.Locked() { - return errors.New("layer store mount information is not locked for writing") + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerdir, ErrStoreIsReadOnly) } + r.mountsLockfile.AssertLockedForWriting() mpath := r.mountspath() if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err @@ -515,63 +987,107 @@ func (r *layerStore) saveMounts() error { if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { return err } + lw, err := r.mountsLockfile.RecordWrite() + if err != nil { + return err + } + r.mountsLastWrite = lw return r.loadMounts() } -func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { +func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { if err := os.MkdirAll(rundir, 0700); err != nil { return nil, err } if err := os.MkdirAll(layerdir, 0700); err != nil { return nil, err } - lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + // Note: While the containers.lock file is in rundir for transient stores + // we don't want to do this here, because the non-transient layers in + // layers.json might be used externally as a read-only layer (using e.g. + // additionalimagestores), and that would look for the lockfile in the + // same directory + lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } - mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) if err != nil { return nil, err } + volatileDir := layerdir + if transient { + volatileDir = rundir + } rlstore := layerStore{ - lockfile: lockfile, + lockfile: lockFile, mountsLockfile: mountsLockfile, - driver: driver, rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), - uidMap: copyIDMap(s.uidMap), - gidMap: copyIDMap(s.gidMap), - } - if err := rlstore.Load(); err != nil { + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(volatileDir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startWritingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopWriting() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err + } + rlstore.lastWrite = lw + // rlstore.mountsLastWrite is initialized inside rlstore.load(). + if _, err := rlstore.load(true); err != nil { return nil, err } return &rlstore, nil } -func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { - lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) { + lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } rlstore := layerStore{ lockfile: lockfile, mountsLockfile: nil, - driver: driver, rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), + jsonPath: [numLayerLocationIndex]string{ + filepath.Join(layerdir, "layers.json"), + filepath.Join(layerdir, "volatile-layers.json"), + }, + layerdir: layerdir, + + byid: make(map[string]*Layer), + byname: make(map[string]*Layer), + bymount: make(map[string]*Layer), + + driver: driver, + } + if err := rlstore.startReadingWithReload(false); err != nil { + return nil, err + } + defer rlstore.stopReading() + lw, err := rlstore.lockfile.GetLastWrite() + if err != nil { + return nil, err } - if err := rlstore.Load(); err != nil { + rlstore.lastWrite = lw + if _, err := rlstore.load(false); err != nil { return nil, err } return &rlstore, nil } +// Requires startReading or startWriting. func (r *layerStore) lookup(id string) (*Layer, bool) { if layer, ok := r.byid[id]; ok { return layer, ok @@ -584,6 +1100,7 @@ func (r *layerStore) lookup(id string) (*Layer, bool) { return nil, false } +// Requires startReading or startWriting. func (r *layerStore) Size(name string) (int64, error) { layer, ok := r.lookup(name) if !ok { @@ -598,21 +1115,23 @@ func (r *layerStore) Size(name string) (int64, error) { return -1, nil } +// Requires startWriting. func (r *layerStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown } delete(layer.Flags, flag) - return r.Save() + return r.saveFor(layer) } +// Requires startWriting. func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -622,13 +1141,14 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { layer.Flags = make(map[string]interface{}) } layer.Flags[flag] = value - return r.Save() + return r.saveFor(layer) } func (r *layerStore) Status() ([][2]string, error) { return r.driver.Status(), nil } +// Requires startWriting. func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) { if duplicateLayer, idInUse := r.byid[id]; idInUse { return duplicateLayer, ErrDuplicateID @@ -663,7 +1183,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s // TODO: check if necessary fields are filled r.layers = append(r.layers, layer) - r.idindex.Add(id) + // This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // Implementing recovery from an unlikely and unimportant failure here would be too risky. + _ = r.idindex.Add(id) r.byid[id] = layer for _, name := range names { // names got from the additional layer store won't be used r.byname[name] = layer @@ -672,20 +1194,22 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) } if layer.UncompressedDigest != "" { - r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } - if err := r.Save(); err != nil { - r.driver.Remove(id) + if err := r.saveFor(layer); err != nil { + if err2 := r.driver.Remove(id); err2 != nil { + logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2) + } return nil, err } return copyLayer(layer), nil } -func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { - if !r.IsReadWrite() { - return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) +// Requires startWriting. +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { + if !r.lockfile.IsReadWrite() { + return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err } @@ -714,12 +1238,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parent = parentLayer.ID } var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) if moreOptions.TemplateLayer != "" { + var tserr error templateLayer, ok := r.lookup(moreOptions.TemplateLayer) if !ok { return nil, -1, ErrLayerUnknown } + templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } } else { templateIDMappings = &idtools.IDMappings{} } @@ -729,8 +1273,65 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parentMappings = &idtools.IDMappings{} } if mountLabel != "" { - label.ReserveLabel(mountLabel) + selinux.ReserveLabel(mountLabel) + } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + volatileStore: moreOptions.Volatile, + } + r.layers = append(r.layers, layer) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // This is on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.saveFor(layer) + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, @@ -738,120 +1339,104 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab IDMappings: idMappings, } if moreOptions.TemplateLayer != "" { - if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) - } - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err) } oldMappings = templateIDMappings } else { if writeable { - if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating read-write layer") + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err) } } else { - if err = r.driver.Create(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating layer") + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err) } } oldMappings = parentMappings } if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { - if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err } } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), - BigDataNames: []string{}, - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - size, err = r.ApplyDiff(layer.ID, diff) - if err != nil { - if r.Delete(layer.ID) != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err } - err = r.Save() + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err + } + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + cleanupFailureContext = "applying layer diff" return nil, -1, err } - layer = copyLayer(layer) + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } } + delete(layer.Flags, incompleteFlag) + err = r.saveFor(layer) + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true return layer, size, err } +// Requires startWriting. func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) return layer, err } +// Requires startWriting. func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) } +// Requires startReading or startWriting. func (r *layerStore) Mounted(id string) (int, error) { - if !r.IsReadWrite() { - return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) - } - r.mountsLockfile.RLock() - defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return 0, err - } + if !r.lockfile.IsReadWrite() { + return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { return 0, ErrLayerUnknown } + // NOTE: The caller of this function is not holding (currently cannot hold) r.mountsLockfile, + // so the data is necessarily obsolete by the time this function returns. So, we don’t even + // try to reload it in this function, we just rely on r.load() that happened during + // r.startReading() or r.startWriting(). return layer.MountCount, nil } +// Requires startWriting. func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { @@ -865,17 +1450,14 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) // You are not allowed to mount layers from readonly stores if they // are not mounted read/only. - if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { - return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return "", err - } + if err := r.reloadMountsIfChanged(); err != nil { + return "", err } - defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { return "", ErrLayerUnknown @@ -915,18 +1497,22 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) return mountpoint, err } -func (r *layerStore) Unmount(id string, force bool) (bool, error) { - if !r.IsReadWrite() { - return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) +// Requires startWriting. +func (r *layerStore) unmount(id string, force bool, conditional bool) (bool, error) { + // LOCKING BUG: This is reachable via store.Diff → layerStore.Diff → layerStore.newFileGetter → simpleGetCloser.Close() + // (with btrfs and zfs graph drivers) holding layerStore only locked for reading, while it modifies + // - r.layers[].MountCount (directly and via loadMounts / saveMounts) + // - r.layers[].MountPoint (directly and via loadMounts / saveMounts) + // - r.bymount (via loadMounts / saveMounts) + + if !r.lockfile.IsReadWrite() { + return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return false, err - } + if err := r.reloadMountsIfChanged(); err != nil { + return false, err } - defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { layerByMount, ok := r.bymount[filepath.Clean(id)] @@ -935,6 +1521,9 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { } layer = layerByMount } + if conditional && layer.MountCount == 0 { + return false, ErrLayerNotMounted + } if force { layer.MountCount = 1 } @@ -954,17 +1543,16 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { return true, err } +// Requires startReading or startWriting. func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { - if !r.IsReadWrite() { - return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + if !r.lockfile.IsReadWrite() { + return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() - if modified, err := r.mountsLockfile.Modified(); modified || err != nil { - if err = r.loadMounts(); err != nil { - return nil, nil, err - } - } + // We are not checking r.mountsLockfile.Modified() and calling r.loadMounts here because the store + // is only locked for reading = we are not allowed to modify layer data. + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations. layer, ok := r.lookup(id) if !ok { return nil, nil, ErrLayerUnknown @@ -977,9 +1565,16 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { // We don't know which directories to examine. return nil, nil, ErrLayerNotMounted } + // Holding r.mountsLockfile protects us against concurrent mount/unmount operations, but we didn’t + // hold it continuously since the time we loaded the mount data; so it’s possible the layer + // was unmounted in the meantime, or mounted elsewhere. Treat that as if we were run after the unmount, + // = a missing mount, not a filesystem error. + if _, err := system.Lstat(layer.MountPoint); errors.Is(err, os.ErrNotExist) { + return nil, nil, ErrLayerNotMounted + } rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) if err != nil { - return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err) } m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) fsuids := make(map[int]struct{}) @@ -987,7 +1582,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { st, err := system.Stat(dir) if err != nil { - return nil, nil, errors.Wrap(err, "read directory ownership") + return nil, nil, fmt.Errorf("read directory ownership: %w", err) } lst, err := system.Lstat(dir) if err != nil { @@ -1025,29 +1620,36 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { return uids, gids, nil } +// Requires startWriting. func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } -func (r *layerStore) SetNames(id string, names []string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) +// Requires startWriting. +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) } - layer.Names = names - return r.Save() + r.byname[name] = layer } - return ErrLayerUnknown + layer.Names = names + return r.saveFor(layer) } func (r *layerStore) datadir(id string) string { @@ -1058,27 +1660,29 @@ func (r *layerStore) datapath(id, key string) string { return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) } +// Requires startReading or startWriting. func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name") + return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName) } layer, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } return os.Open(r.datapath(layer.ID, key)) } +// Requires startWriting. func (r *layerStore) SetBigData(id, key string, data io.Reader) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item") + return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) } - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id) + return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown) } err := os.MkdirAll(r.datadir(layer.ID), 0700) if err != nil { @@ -1090,16 +1694,16 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error { // so that it is either accessing the old data or the new one. writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) if err != nil { - return errors.Wrapf(err, "error opening bigdata file") + return fmt.Errorf("opening bigdata file: %w", err) } if _, err := io.Copy(writer, data); err != nil { writer.Close() - return errors.Wrapf(err, "error copying bigdata for the layer") + return fmt.Errorf("copying bigdata for the layer: %w", err) } if err := writer.Close(); err != nil { - return errors.Wrapf(err, "error closing bigdata file for the layer") + return fmt.Errorf("closing bigdata file for the layer: %w", err) } addName := true @@ -1111,19 +1715,21 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error { } if addName { layer.BigDataNames = append(layer.BigDataNames, key) - return r.Save() + return r.saveFor(layer) } return nil } +// Requires startReading or startWriting. func (r *layerStore) BigDataNames(id string) ([]string, error) { layer, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id) + return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown) } return copyStringSlice(layer.BigDataNames), nil } +// Requires startReading or startWriting. func (r *layerStore) Metadata(id string) (string, error) { if layer, ok := r.lookup(id); ok { return layer.Metadata, nil @@ -1131,13 +1737,14 @@ func (r *layerStore) Metadata(id string) (string, error) { return "", ErrLayerUnknown } +// Requires startWriting. func (r *layerStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerdir, ErrStoreIsReadOnly) } if layer, ok := r.lookup(id); ok { layer.Metadata = metadata - return r.Save() + return r.saveFor(layer) } return ErrLayerUnknown } @@ -1146,17 +1753,41 @@ func (r *layerStore) tspath(id string) string { return filepath.Join(r.layerdir, id+tarSplitSuffix) } +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +// The caller must hold r.inProcessLock for reading. +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + +// Requires startWriting. func (r *layerStore) deleteInternal(id string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.saveFor(layer); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID - err := r.driver.Remove(id) - if err != nil { + if err := r.driver.Remove(id); err != nil { return err } @@ -1166,7 +1797,9 @@ func (r *layerStore) deleteInternal(id string) error { for _, name := range layer.Names { delete(r.byname, name) } - r.idindex.Delete(id) + // This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway. + // The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data. + _ = r.idindex.Delete(id) mountLabel := layer.MountLabel if layer.MountPoint != "" { delete(r.bymount, layer.MountPoint) @@ -1196,13 +1829,14 @@ func (r *layerStore) deleteInternal(id string) error { } } if !found { - label.ReleaseLabel(mountLabel) + selinux.ReleaseLabel(mountLabel) } } return nil } +// Requires startWriting. func (r *layerStore) deleteInDigestMap(id string) { for digest, layers := range r.bycompressedsum { for i, layerID := range layers { @@ -1224,6 +1858,7 @@ func (r *layerStore) deleteInDigestMap(id string) { } } +// Requires startWriting. func (r *layerStore) Delete(id string) error { layer, ok := r.lookup(id) if !ok { @@ -1233,37 +1868,28 @@ func (r *layerStore) Delete(id string) error { // The layer may already have been explicitly unmounted, but if not, we // should try to clean that up before we start deleting anything at the // driver level. - mountCount, err := r.Mounted(id) - if err != nil { - return errors.Wrapf(err, "error checking if layer %q is still mounted", id) - } - for mountCount > 0 { - if _, err := r.Unmount(id, false); err != nil { - return err + for { + _, err := r.unmount(id, false, true) + if err == ErrLayerNotMounted { + break } - mountCount, err = r.Mounted(id) if err != nil { - return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + return err } } if err := r.deleteInternal(id); err != nil { return err } - return r.Save() -} - -func (r *layerStore) Lookup(name string) (id string, err error) { - if layer, ok := r.lookup(name); ok { - return layer.ID, nil - } - return "", ErrLayerUnknown + return r.saveFor(layer) } +// Requires startReading or startWriting. func (r *layerStore) Exists(id string) bool { _, ok := r.lookup(id) return ok } +// Requires startReading or startWriting. func (r *layerStore) Get(id string) (*Layer, error) { if layer, ok := r.lookup(id); ok { return copyLayer(layer), nil @@ -1271,14 +1897,18 @@ func (r *layerStore) Get(id string) (*Layer, error) { return nil, ErrLayerUnknown } +// Requires startWriting. func (r *layerStore) Wipe() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + if !r.lockfile.IsReadWrite() { + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) for id := range r.byid { ids = append(ids, id) } + sort.Slice(ids, func(i, j int) bool { + return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created) + }) for _, id := range ids { if err := r.Delete(id); err != nil { return err @@ -1287,6 +1917,7 @@ func (r *layerStore) Wipe() error { return nil } +// Requires startReading or startWriting. func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { var ok bool toLayer, ok = r.lookup(to) @@ -1311,6 +1942,7 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st return from, to, fromLayer, toLayer, nil } +// The caller must hold r.inProcessLock for reading. func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { if layer == nil { return &idtools.IDMappings{} @@ -1318,6 +1950,7 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) } +// Requires startReading or startWriting. func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) if err != nil { @@ -1336,11 +1969,13 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { return os.Open(filepath.Join(s.path, path)) } +// LOCKING BUG: See the comments in layerStore.Diff func (s *simpleGetCloser) Close() error { - _, err := s.r.Unmount(s.id, false) + _, err := s.r.unmount(s.id, false, false) return err } +// LOCKING BUG: See the comments in layerStore.Diff func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { return getter.DiffGetter(id) @@ -1356,6 +1991,25 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { }, nil } +// writeCompressedData copies data from source to compressor, which is on top of pwriter. +func writeCompressedData(compressor io.WriteCloser, source io.ReadCloser) error { + defer compressor.Close() + defer source.Close() + _, err := io.Copy(compressor, source) + return err +} + +// writeCompressedDataGoroutine copies data from source to compressor, which is on top of pwriter. +// All error must be reported by updating pwriter. +func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteCloser, source io.ReadCloser) { + err := errors.New("internal error: unexpected panic in writeCompressedDataGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pwriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + err = writeCompressedData(compressor, source) +} + +// Requires startReading or startWriting. func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { var metadata storage.Unpacker @@ -1387,12 +2041,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, preader.Close() return nil, err } - go func() { - defer pwriter.Close() - defer compressor.Close() - defer rc.Close() - io.Copy(compressor, rc) - }() + go writeCompressedDataGoroutine(pwriter, compressor, rc) return preader, nil } @@ -1406,7 +2055,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok { if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil { - // This is an additional layer. We leverage blob API for aquiring the reproduced raw blob. + // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob. info, err := aLayer.Info() if err != nil { aLayer.Release() @@ -1428,7 +2077,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, diff, err := archive.DecompressStream(blob) if err != nil { if err2 := blob.Close(); err2 != nil { - err = errors.Wrapf(err, "failed to close blob file: %v", err2) + err = fmt.Errorf("failed to close blob file: %v: %w", err2, err) } aLayer.Release() return nil, err @@ -1436,7 +2085,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, rc, err := maybeCompressReadCloser(diff) if err != nil { if err2 := closeAll(blob.Close, diff.Close); err2 != nil { - err = errors.Wrapf(err, "failed to cleanup: %v", err2) + err = fmt.Errorf("failed to cleanup: %v: %w", err2, err) } aLayer.Release() return nil, err @@ -1461,38 +2110,55 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } return maybeCompressReadCloser(diff) } - defer tsfile.Close() decompressor, err := pgzip.NewReader(tsfile) if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } return nil, err } - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + metadata = storage.NewJSONUnpacker(decompressor) + // LOCKING BUG: With btrfs and zfs graph drivers), this uses r.Mount() and r.unmount() holding layerStore only locked for reading + // but they modify in-memory state. fgetter, err := r.newFileGetter(to) if err != nil { - return nil, err + errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err)) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + } + return nil, errs.ErrorOrNil() } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err)) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err)) + } + if errs != nil { + return errs.ErrorOrNil() } - return err2 + return nil }) return maybeCompressReadCloser(rc) } +// Requires startReading or startWriting. func (r *layerStore) DiffSize(from, to string) (size int64, err error) { var fromLayer, toLayer *Layer from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) @@ -1502,9 +2168,15 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } +// Requires startWriting. func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { - if !r.IsReadWrite() { - return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + return r.applyDiffWithOptions(to, nil, diff) +} + +// Requires startWriting. +func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { + if !r.lockfile.IsReadWrite() { + return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(to) @@ -1517,25 +2189,48 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error if err != nil && err != io.EOF { return -1, err } - compression := archive.DetectCompression(header[:n]) - compressedDigest := digest.Canonical.Digester() - compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash()) - defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + + // Decide if we need to compute digests + var compressedDigest, uncompressedDigest digest.Digest // = "" + var compressedDigester, uncompressedDigester digest.Digester // = nil + if layerOptions != nil && layerOptions.OriginalDigest != "" && + layerOptions.OriginalDigest.Algorithm() == digest.Canonical { + compressedDigest = layerOptions.OriginalDigest + } else { + compressedDigester = digest.Canonical.Digester() + } + if layerOptions != nil && layerOptions.UncompressedDigest != "" && + layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { + uncompressedDigest = layerOptions.UncompressedDigest + } else { + uncompressedDigester = digest.Canonical.Digester() + } + + var compressedWriter io.Writer + if compressedDigester != nil { + compressedWriter = compressedDigester.Hash() + } else { + compressedWriter = io.Discard + } + compressedCounter := ioutils.NewWriteCounter(compressedWriter) + defragmented = io.TeeReader(defragmented, compressedCounter) tsdata := bytes.Buffer{} compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) if err != nil { compressor = pgzip.NewWriter(&tsdata) } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } metadata := storage.NewJSONPacker(compressor) uncompressed, err := archive.DecompressStream(defragmented) if err != nil { return -1, err } defer uncompressed.Close() - uncompressedDigest := digest.Canonical.Digester() - uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash()) uidLog := make(map[uint32]struct{}) gidLog := make(map[uint32]struct{}) idLogger, err := tarlog.NewLogger(func(h *tar.Header) { @@ -1548,7 +2243,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error return -1, err } defer idLogger.Close() - payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter()) + uncompressedCounter := ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) if err != nil { return -1, err } @@ -1562,13 +2262,17 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error return -1, err } compressor.Close() - if err == nil { - if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { - return -1, err - } - if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { - return -1, err - } + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + if compressedDigester != nil { + compressedDigest = compressedDigester.Digest() + } + if uncompressedDigester != nil { + uncompressedDigest = uncompressedDigester.Digest() } updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { @@ -1589,11 +2293,11 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error (*m)[newvalue] = append((*m)[newvalue], id) } } - updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID) - layer.CompressedDigest = compressedDigest.Digest() + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) + layer.CompressedDigest = compressedDigest layer.CompressedSize = compressedCounter.Count - updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID) - layer.UncompressedDigest = uncompressedDigest.Digest() + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) + layer.UncompressedDigest = uncompressedDigest layer.UncompressedSize = uncompressedCounter.Count layer.CompressionType = compression layer.UIDs = make([]uint32, 0, len(uidLog)) @@ -1611,11 +2315,12 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error return layer.GIDs[i] < layer.GIDs[j] }) - err = r.Save() + err = r.saveFor(layer) return size, err } +// Requires (startReading or?) startWriting. func (r *layerStore) DifferTarget(id string) (string, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1628,6 +2333,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { return ddriver.DifferTarget(layer.ID) } +// Requires startWriting. func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1652,18 +2358,21 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, layer.UncompressedDigest = diffOutput.UncompressedDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata - if err = r.Save(); err != nil { + if err = r.saveFor(layer); err != nil { return err } for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { - r.Delete(id) + if err2 := r.Delete(id); err2 != nil { + logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2) + } return err } } return err } +// Requires startWriting. func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { @@ -1691,7 +2400,7 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp } layer.UIDs = output.UIDs layer.GIDs = output.GIDs - err = r.Save() + err = r.saveFor(layer) return &output, err } @@ -1703,6 +2412,7 @@ func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error { return ddriver.CleanupStagingDirectory(stagingDirectory) } +// Requires startReading or startWriting. func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { var layers []Layer for _, layerID := range m[d] { @@ -1715,82 +2425,28 @@ func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Di return layers, nil } +// Requires startReading or startWriting. func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.bycompressedsum, d) } +// Requires startReading or startWriting. func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.byuncompressedsum, d) } -func (r *layerStore) Lock() { - r.lockfile.Lock() -} - -func (r *layerStore) RecursiveLock() { - r.lockfile.RecursiveLock() -} - -func (r *layerStore) RLock() { - r.lockfile.RLock() -} - -func (r *layerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *layerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *layerStore) Modified() (bool, error) { - var mmodified bool - lmodified, err := r.lockfile.Modified() - if err != nil { - return lmodified, err - } - if r.IsReadWrite() { - r.mountsLockfile.RLock() - defer r.mountsLockfile.Unlock() - mmodified, err = r.mountsLockfile.Modified() - if err != nil { - return lmodified, err - } - } - return lmodified || mmodified, nil -} - -func (r *layerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *layerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} - -func (r *layerStore) Locked() bool { - return r.lockfile.Locked() -} - -func (r *layerStore) ReloadIfChanged() error { - r.loadMut.Lock() - defer r.loadMut.Unlock() - - modified, err := r.Modified() - if err == nil && modified { - return r.Load() - } - return err +func (r *layerStore) supportsShifting() bool { + return r.driver.SupportsShifting() } func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { if rErr == nil { - rErr = errors.Wrapf(err, "close error") + rErr = fmt.Errorf("close error: %w", err) continue } - rErr = errors.Wrapf(rErr, "%v", err) + rErr = fmt.Errorf("%v: %w", err, rErr) } } return diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go index 6fac2ebac..640203881 100644 --- a/vendor/github.com/containers/storage/lockfile_compat.go +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -4,12 +4,15 @@ import ( "github.com/containers/storage/pkg/lockfile" ) -type Locker = lockfile.Locker +// Deprecated: Use lockfile.*LockFile. +type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated +// Deprecated: Use lockfile.GetLockFile. func GetLockfile(path string) (lockfile.Locker, error) { return lockfile.GetLockfile(path) } +// Deprecated: Use lockfile.GetROLockFile. func GetROLockfile(path string) (lockfile.Locker, error) { return lockfile.GetROLockfile(path) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index aa6689593..6209205b3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -5,12 +5,14 @@ import ( "bufio" "bytes" "compress/bzip2" + "errors" "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "runtime" + "strconv" "strings" "sync" "syscall" @@ -22,8 +24,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" gzip "github.com/klauspost/pgzip" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" ) @@ -71,12 +71,17 @@ type ( ) const ( - tarExt = "tar" - solaris = "solaris" - windows = "windows" - containersOverrideXattr = "user.containers.override_stat" + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" + freebsd = "freebsd" ) +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + // Archiver allows the reuse of most utility functions of this package with a // pluggable Untar function. To facilitate the passing of specific id mappings // for untar, an archiver can be created with maps which will then be passed to @@ -214,7 +219,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { case Zstd: return zstdReader(buf) default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } } @@ -235,9 +240,9 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } } @@ -357,7 +362,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) } hdr.Name = name if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { @@ -400,7 +405,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { for _, xattr := range []string{"security.capability", "security.ima"} { capability, err := system.Lgetxattr(path, xattr) if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { - return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) } if capability != nil { hdr.Xattrs[xattr] = string(capability) @@ -478,7 +483,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts * } // canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. +// path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { name, err := CanonicalTarNameForPath(name) if err != nil { @@ -507,6 +512,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } hdr, err := FileInfoHeader(name, fi, link) if err != nil { @@ -518,6 +527,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err := ReadUserXattrToTarHeader(path, hdr); err != nil { return err } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return err + } if ta.CopyPass { copyPassHeader(hdr) } @@ -645,10 +657,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } file.Close() - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) return nil } + fallthrough + case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err @@ -660,7 +675,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } - if err := os.Link(targetPath, path); err != nil { + if err := handleLLink(targetPath, path); err != nil { return err } @@ -686,9 +701,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - if forceMask != nil && hdr.Typeflag != tar.TypeSymlink { + if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) - if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil { + if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } } @@ -740,6 +755,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errs []string for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support @@ -755,6 +773,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + if len(errs) > 0 { logrus.WithFields(logrus.Fields{ "errors": errs, @@ -849,17 +876,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. - return nil + return nil //nolint: nilerr } if options.IncludeSourceDir && include == "." && relFilePath != "." { @@ -876,8 +903,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if include != relFilePath { matches, err := pm.IsMatch(relFilePath) if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err + return fmt.Errorf("matching %s: %w", relFilePath, err) } skip = matches } @@ -889,7 +915,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. - if !f.IsDir() { + if !d.IsDir() { return nil } @@ -940,7 +966,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } } return nil - }) + }); err != nil { + logrus.Errorf("%s", err) + return + } } }() @@ -959,11 +988,14 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) + doChown := !options.NoLchown if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } } @@ -1064,7 +1096,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } @@ -1081,6 +1113,9 @@ loop: if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } } return nil } @@ -1088,7 +1123,9 @@ loop: // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// +// identity (uncompressed), gzip, bzip2, xz. +// // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) @@ -1104,7 +1141,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { - return fmt.Errorf("Empty archive") + return fmt.Errorf("empty archive") } dest = filepath.Clean(dest) if options == nil { @@ -1140,7 +1177,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { GIDMaps: tarMappings.GIDs(), Compression: Uncompressed, CopyPass: true, - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } archive, err := TarWithOptions(src, options) if err != nil { @@ -1155,7 +1192,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } return archiver.Untar(archive, dst, options) } @@ -1175,7 +1212,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), } return archiver.Untar(archive, dst, options) } @@ -1220,7 +1257,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") + return fmt.Errorf("can't copy a directory") } // Clean up the trailing slash. This must be done in an operating @@ -1275,7 +1312,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { UIDMaps: archiver.UntarIDMappings.UIDs(), GIDMaps: archiver.UntarIDMappings.GIDs(), ChownOpts: archiver.ChownOpts, - InUserNS: rsystem.RunningInUserNS(), + InUserNS: unshare.IsRootless(), NoOverwriteDirNonDir: true, } err = archiver.Untar(r, filepath.Dir(dst), options) @@ -1295,6 +1332,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id if err != nil { return err } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } } else { uid, gid = hdr.Uid, hdr.Gid } @@ -1314,7 +1366,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") + f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } @@ -1416,7 +1468,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { contentReader, contentWriter, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) } defer contentReader.Close() defer contentWriter.Close() @@ -1435,11 +1487,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap hashWorker.Done() }() if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { - err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() - if err == nil { - err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go index 7bc44a566..eab9da51a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -1,3 +1,4 @@ +//go:build go1.10 // +build go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go index d19811fdb..f591bf389 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -1,3 +1,4 @@ +//go:build !go1.10 // +build !go1.10 package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go new file mode 100644 index 000000000..4d362f075 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,19 @@ +//go:build freebsd || darwin +// +build freebsd darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go deleted file mode 100644 index 1953b4051..000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build freebsd - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { - permissionsMask := hdrInfo.Mode() - if forceMask != nil { - permissionsMask = *forceMask - } - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 2f548b661..36e5d4bc2 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } @@ -189,3 +189,22 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) { } return 0, 0, uint32(f.Mode()), nil } + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go index 4b8834444..2468ab3ca 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 5438700ab..f8a34c831 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -1,4 +1,5 @@ -// +build !windows,!freebsd +//go:build !windows +// +build !windows package archive @@ -11,7 +12,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" "golang.org/x/sys/unix" ) @@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // Currently go does not fill in the major/minors if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert } } @@ -88,11 +88,6 @@ func minor(device uint64) uint64 { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: @@ -103,24 +98,15 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode |= unix.S_IFIFO } - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) } -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { - permissionsMask := hdrInfo.Mode() - if forceMask != nil { - permissionsMask = *forceMask - } - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, permissionsMask); err != nil { - return err - } - } - return nil +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index a0872444f..e44011775 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package archive @@ -34,7 +35,7 @@ func CanonicalTarNameForPath(p string) (string, error) { // in file names, it is mostly safe to replace however we must // check just in case if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) + return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil @@ -77,3 +78,8 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows return idtools.IDPair{0, 0}, nil } + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index c7bb25d0f..fc705484e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -5,7 +5,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -57,7 +56,7 @@ func (change *Change) String() string { return fmt.Sprintf("%s %s", change.Kind, change.Path) } -// for sort.Sort +// changesByPath implements sort.Interface. type changesByPath []Change func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } @@ -403,7 +402,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldRoot, newRoot *FileInfo ) if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") + emptyDir, err := os.MkdirTemp("", "empty") if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index bbbd8c9de..c27930e97 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,9 +1,11 @@ +//go:build !linux // +build !linux package archive import ( "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -41,7 +43,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -84,8 +91,12 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf if err != nil { return err } - info.stat = s + if s.Dev() != sourceStat.Dev() { + return filepath.SkipDir + } + + info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index 1cc1910f8..6b2e59380 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package archive @@ -29,6 +30,7 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta if oldStat.Mode() != newStat.Mode() || ownerChanged || oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 6298a674d..2c714e8da 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -4,7 +4,6 @@ import ( "archive/tar" "errors" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -255,7 +254,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir // The destination exists as a directory. No alteration // to srcContent is needed as its contents can be // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil + return dstInfo.Path, io.NopCloser(srcContent), nil case dstInfo.Exists && srcInfo.IsDir: // The destination exists as some type of file and the source // content is a directory. This is an error condition since diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go index e305b5e4a..d6c5fd98b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package archive diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 14ffad5c0..8fec5af38 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -4,7 +4,7 @@ import ( "archive/tar" "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "runtime" @@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0600) + err = os.MkdirAll(parentPath, 0755) if err != nil { return 0, err } @@ -101,7 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) @@ -134,7 +134,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err != nil { return 0, err } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted @@ -145,6 +145,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return nil } if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } err := os.RemoveAll(path) return err } @@ -156,6 +159,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } else { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } if err := os.RemoveAll(originalPath); err != nil { return 0, err } @@ -165,7 +171,15 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in createTarFile otherwise. if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return 0, err @@ -183,7 +197,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") + return 0, fmt.Errorf("invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { @@ -215,6 +229,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } } return size, nil @@ -245,7 +262,9 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp if err != nil { return 0, err } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() if decompress { layer, err = DecompressStream(layer) diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 000000000..14661c411 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,167 @@ +//go:build freebsd +// +build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if strings.HasPrefix(fflag, "no") { + isClear = true + fflag = strings.TrimPrefix(fflag, "no") + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + var res = []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 000000000..27a0bae2b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,21 @@ +//go:build !freebsd +// +build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go index e85aac054..8db64e804 100644 --- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package archive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index aacfee76f..2de95f39a 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -4,26 +4,15 @@ import ( stdtar "archive/tar" "fmt" "io" - "io/ioutil" - "net" "os" - "os/user" "path/filepath" "sync" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" + "github.com/containers/storage/pkg/unshare" ) -func init() { - // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host - // environment not in the chroot from untrusted files. - _, _ = user.Lookup("storage") - _, _ = net.LookupHost("localhost") -} - // NewArchiver returns a new Archiver which uses chrootarchive.Untar func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { archiver := archive.NewArchiver(idMappings) @@ -41,7 +30,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools. // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// +// identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, true, dest) } @@ -72,11 +62,11 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { if tarArchive == nil { - return fmt.Errorf("Empty archive") + return fmt.Errorf("empty archive") } if options == nil { options = &archive.TarOptions{} - options.InUserNS = rsystem.RunningInUserNS() + options.InUserNS = unshare.IsRootless() } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} @@ -92,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions } } - r := ioutil.NopCloser(tarArchive) + r := tarArchive if decompress { decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { @@ -124,7 +114,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { contentReader, contentWriter, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) } defer contentReader.Close() defer contentWriter.Close() @@ -143,11 +133,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap hashWorker.Done() }() if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { - err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() - if err == nil { - err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go new file mode 100644 index 000000000..42ee39f48 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -0,0 +1,17 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +func invokeUnpack(decompressedArchive io.Reader, + dest string, + options *archive.TarOptions, root string) error { + return archive.Unpack(decompressedArchive, dest, options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 9da10fe33..8cc0f33b3 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -1,13 +1,14 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package chrootarchive import ( "bytes" + "errors" "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" ) // untar is the entry-point for storage-untar on re-exec. This is not used on @@ -69,7 +69,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T // child r, w, err := os.Pipe() if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) + return fmt.Errorf("untar pipe failure: %w", err) } if root != "" { @@ -96,13 +96,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T if err := cmd.Start(); err != nil { w.Close() - return fmt.Errorf("Untar error on re-exec cmd: %v", err) + return fmt.Errorf("untar error on re-exec cmd: %w", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() - return fmt.Errorf("Untar json encode to pipe failed: %v", err) + return fmt.Errorf("untar json encode to pipe failed: %w", err) } w.Close() @@ -110,9 +110,9 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, // we need to exhaust `xz`'s output, otherwise the `xz` side will be // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) + io.Copy(io.Discard, decompressedArchive) - return fmt.Errorf("Error processing tar file(%v): %s", err, output) + return fmt.Errorf("processing tar file(%s): %w", output, err) } return nil } @@ -184,22 +184,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re stdin, err := cmd.StdinPipe() if err != nil { - return nil, errors.Wrap(err, "error getting options pipe for tar process") + return nil, fmt.Errorf("getting options pipe for tar process: %w", err) } if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "tar error on re-exec cmd") + return nil, fmt.Errorf("tar error on re-exec cmd: %w", err) } go func() { err := cmd.Wait() - err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + if err != nil { + err = fmt.Errorf("processing tar file(%s): %w", errBuff, err) + } tarW.CloseWithError(err) }() if err := json.NewEncoder(stdin).Encode(options); err != nil { stdin.Close() - return nil, errors.Wrap(err, "tar json encode to pipe failed") + return nil, fmt.Errorf("tar json encode to pipe failed: %w", err) } stdin.Close() diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 8a5c680b1..1395ff8cd 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -12,7 +12,7 @@ func chroot(path string) error { return nil } -func invokeUnpack(decompressedArchive io.ReadCloser, +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { // Windows is different to Linux here because Windows does not support diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 76c94c6c1..09ef6d5de 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -2,8 +2,9 @@ package chrootarchive import ( "fmt" - "io/ioutil" + "net" "os" + "os/user" "path/filepath" "github.com/containers/storage/pkg/mount" @@ -23,13 +24,18 @@ func chroot(path string) (err error) { return err } + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { return realChroot(path) } if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + return fmt.Errorf("creating mount namespace before pivot: %w", err) } // make everything in new ns private @@ -44,9 +50,9 @@ func chroot(path string) (err error) { } // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") + pivotDir, err := os.MkdirTemp(path, ".pivot_root") if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) + return fmt.Errorf("setting up pivot dir: %w", err) } var mounted bool @@ -65,7 +71,7 @@ func chroot(path string) (err error) { // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful // because we already cleaned it up on failed pivot_root if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup) if err == nil { err = errCleanup } @@ -75,7 +81,7 @@ func chroot(path string) (err error) { if err := unix.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + return fmt.Errorf("cleaning up after failed pivot: %w", err) } return realChroot(path) } @@ -86,17 +92,17 @@ func chroot(path string) (err error) { pivotDir = filepath.Join("/", filepath.Base(pivotDir)) if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) + return fmt.Errorf("changing to new root: %w", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) + return fmt.Errorf("making old root private after pivot: %w", err) } // Now unmount the old root so it's no longer visible from the new root if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + return fmt.Errorf("while unmounting old root after pivot: %w", err) } mounted = false @@ -105,10 +111,10 @@ func chroot(path string) (err error) { func realChroot(path string) error { if err := unix.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) + return fmt.Errorf("after fallback to chroot: %w", err) } if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) + return fmt.Errorf("changing to new root after chroot: %w", err) } return nil } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index 83278ee50..b03e97460 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -1,4 +1,5 @@ -// +build !windows,!linux +//go:build !windows && !linux && !darwin +// +build !windows,!linux,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go new file mode 100644 index 000000000..7b4ea9e51 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go @@ -0,0 +1,40 @@ +package chrootarchive + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, options) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index c884d3784..90f453913 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -1,4 +1,5 @@ -//+build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package chrootarchive @@ -7,7 +8,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -15,7 +15,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/containers/storage/pkg/unshare" ) type applyLayerResponse struct { @@ -35,7 +35,7 @@ func applyLayer() { runtime.LockOSThread() flag.Parse() - inUserns := rsystem.RunningInUserNS() + inUserns := unshare.IsRootless() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } @@ -55,7 +55,7 @@ func applyLayer() { options.InUserNS = true } - if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { + if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil { fatal(err) } @@ -68,7 +68,7 @@ func applyLayer() { encoder := json.NewEncoder(os.Stdout) if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err)) } if _, err := flush(os.Stdin); err != nil { @@ -94,7 +94,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions } if options == nil { options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { + if unshare.IsRootless() { options.InUserNS = true } } @@ -104,25 +104,25 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions data, err := json.Marshal(options) if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + return 0, fmt.Errorf("ApplyLayer json encode: %w", err) } cmd := reexec.Command("storage-applyLayer", dest) cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + cmd.Env = append(os.Environ(), fmt.Sprintf("OPT=%s", data)) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err) } return response.LayerSize, nil diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go index 8f8e88bfb..8bfff5d65 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -3,7 +3,6 @@ package chrootarchive import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions layer = decompressed } - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract") if err != nil { return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go new file mode 100644 index 000000000..fa17c9bf8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go index ea08135e4..274a946e2 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -1,11 +1,11 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package chrootarchive import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/storage/pkg/reexec" @@ -25,5 +25,5 @@ func fatal(err error) { // flush consumes all the bytes from the reader discarding // any errors func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) + return io.Copy(io.Discard, r) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go index 63f970456..40eec4dc0 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -1,3 +1,6 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + package chrootarchive import jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index b9e2658a7..f6e0cfcfe 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -12,107 +12,109 @@ type ThinpoolOptionsConfig struct { // grown. This is specified in terms of % of pool size. So a value of // 20 means that when threshold is hit, pool will be grown by 20% of // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent"` + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` // AutoExtendThreshold determines the pool extension threshold in terms // of percentage of pool size. For example, if threshold is 60, that // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold"` + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` // BaseSize specifies the size to use when creating the base device, // which limits the size of images and containers. - BaseSize string `toml:"basesize"` + BaseSize string `toml:"basesize,omitempty"` // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize"` + BlockSize string `toml:"blocksize,omitempty"` // DirectLvmDevice specifies a custom block storage device to use for // the thin pool. - DirectLvmDevice string `toml:"directlvm_device"` + DirectLvmDevice string `toml:"directlvm_device,omitempty"` // DirectLvmDeviceForcewipes device even if device already has a // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force"` + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs"` + Fs string `toml:"fs,omitempty"` // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level"` + LogLevel string `toml:"log_level,omitempty"` // MetadataSize specifies the size of the metadata for the thinpool // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize"` + MetadataSize string `toml:"metadatasize,omitempty"` // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to - MinFreeSpace string `toml:"min_free_space"` + MinFreeSpace string `toml:"min_free_space,omitempty"` // MkfsArg specifies extra mkfs arguments to be used when creating the // basedevice. - MkfsArg string `toml:"mkfsarg"` + MkfsArg string `toml:"mkfsarg,omitempty"` // MountOpt specifies extra mount options used when mounting the thin // devices. - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion"` + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal"` + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of // retries XFS should attempt to complete IO when ENOSPC (no space) // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` } type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` } type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device - MinSpace string `toml:"min_space"` + MinSpace string `toml:"min_space,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } type OverlayOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` + // Inodes is used to set a maximum inodes of the container image. + Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories - ForceMask string `toml:"force_mask"` + ForceMask string `toml:"force_mask,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` } type ZfsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Name is the File System name of the ZFS File system - Name string `toml:"fsname"` + Name string `toml:"fsname,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } // OptionsConfig represents the "storage.options" TOML config table. @@ -120,82 +122,82 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores"` + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids"` + RemapUIDs string `toml:"remap-uids,omitempty"` // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids"` + RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. - ForceMask os.FileMode `toml:"force_mask"` + ForceMask os.FileMode `toml:"force_mask,omitempty"` // RemapUser is the name of one or more entries in /etc/subuid which // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user"` + RemapUser string `toml:"remap-user,omitempty"` // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group"` + RemapGroup string `toml:"remap-group,omitempty"` // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. - RootAutoUsernsUser string `toml:"root-auto-userns-user"` + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` // AutoUsernsMinSize is the minimum size for a user namespace that is // created automatically. - AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` // AutoUsernsMaxSize is the maximum size for a user namespace that is // created automatically. - AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` // Btrfs container options to be handed to btrfs drivers - Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers - Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` // Vfs container options to be handed to VFS drivers - Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` // Zfs container options to be handed to ZFS drivers - Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // PullOptions specifies options to be handed to pull managers // This API is experimental and can be changed without bumping the major version number. - PullOptions map[string]string `toml:"pull_options"` + PullOptions map[string]string `toml:"pull_options,omitempty"` // DisableVolatile doesn't allow volatile mounts when it is set. - DisableVolatile bool `toml:"disable-volatile"` + DisableVolatile bool `toml:"disable-volatile,omitempty"` } // GetGraphDriverOptions returns the driver specific options @@ -296,6 +298,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } else if options.Size != "" { doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) } + if options.Overlay.Inodes != "" { + doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes)) + } if options.Overlay.SkipMountHome != "" { doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) } else if options.SkipMountHome != "" { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 6a0ac2464..6e6852d4d 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper @@ -805,7 +806,7 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { if doSuspend { if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2) } } return err diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go index 082fb1ba3..ead15c148 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index 190d83d49..7baca8126 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go index 7f793c270..071f7f35b 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && !libdm_no_deferred_remove // +build linux,cgo,!libdm_no_deferred_remove package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go index 7d8450898..93dcc3221 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && !static_build // +build linux,cgo,!static_build package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go index a880fec8c..91906f2ef 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && libdm_no_deferred_remove // +build linux,cgo,libdm_no_deferred_remove package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go index cf7f26a4c..68ea48fe5 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -1,3 +1,4 @@ +//go:build linux && cgo && static_build // +build linux,cgo,static_build package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go index 50ea7c482..90ffe2c3f 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go index b0ce706e5..829fe59f3 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -1,7 +1,6 @@ package directory import ( - "io/ioutil" "os" "path/filepath" ) @@ -15,7 +14,7 @@ type DiskUsage struct { // MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path func MoveToSubdir(oldpath, subdir string) error { - infos, err := ioutil.ReadDir(oldpath) + infos, err := os.ReadDir(oldpath) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 8d58d24ca..36e1bdd5f 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,8 +1,10 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package directory import ( + "io/fs" "os" "path/filepath" "syscall" @@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) { func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { return err } - if fileInfo == nil { - return nil + fileInfo, err := entry.Info() + if err != nil { + return err } // Check inode to only count the sizes of files with multiple hard links once. @@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) { // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} - // Ignore directory sizes - if fileInfo.IsDir() { + if entry.IsDir() { return nil } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index a7a81240b..482bc51a2 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,8 +1,10 @@ +//go:build windows // +build windows package directory import ( + "io/fs" "os" "path/filepath" ) @@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) { // Usage walks a directory tree and returns its total size in bytes and the number of inodes. func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if os.IsNotExist(err) && path != dir { return nil } return err @@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) { usage.InodeCount++ // Ignore directory sizes - if fileInfo == nil { + if d.IsDir() { return nil } - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil + fileInfo, err := d.Info() + if err != nil { + return err } - - usage.Size += s + usage.Size += fileInfo.Size() return nil }) diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go index 7df7f3d43..e883d25f5 100644 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmesg diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index b3998fb35..e3cef48a3 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -1,6 +1,7 @@ package fileutils import ( + "errors" "fmt" "io" "os" @@ -9,7 +10,6 @@ import ( "strings" "text/scanner" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -38,7 +38,7 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { return nil, errors.New("illegal exclusion pattern: \"!\"") } newp.exclusion = true - p = p[1:] + p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") pm.exclusions = true } // Do some syntax checking on the pattern. @@ -321,14 +321,14 @@ func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) @@ -340,13 +340,13 @@ func ReadSymlinkedDirectory(path string) (string, error) { // The target of the symbolic link can be a file and a directory. func ReadSymlinkedPath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", errors.Wrapf(err, "unable to get absolute path for %q", path) + return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", errors.Wrapf(err, "failed to canonicalise path for %q", path) + return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) } if _, err := os.Stat(realPath); err != nil { - return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path) + return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) } return realPath, nil } diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go index 9e0e97bd6..92e0263d8 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -1,10 +1,10 @@ +//go:build linux || freebsd // +build linux freebsd package fileutils import ( "fmt" - "io/ioutil" "os" "github.com/sirupsen/logrus" @@ -13,8 +13,8 @@ import ( // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("%v", err) } else { return len(fds) } diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go index e6094b55b..9854cac1c 100644 --- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package fsutils import ( "fmt" - "io/ioutil" "os" "unsafe" @@ -12,14 +12,14 @@ import ( ) func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) + children, err := os.ReadDir(path) if err != nil { return "", err } if len(children) != 0 { return "", nil } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + dummyFile, err := os.CreateTemp(path, "fsutils-dummy") if err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 000000000..85c5e76c8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 06b53854b..0883ee023 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin && !freebsd // +build !linux,!darwin,!freebsd package homedir @@ -18,18 +19,3 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetCacheHome is unsupported on non-linux system. -func GetCacheHome() (string, error) { - return "", errors.New("homedir.GetCacheHome() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 2475e351b..9976f19af 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package homedir @@ -46,7 +47,7 @@ func GetShortcutString() string { // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetRuntimeDir() (string, error) { if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil + return filepath.EvalSymlinks(xdgRuntimeDir) } return "", errors.New("could not get XDG_RUNTIME_DIR") } @@ -62,7 +63,7 @@ func StickRuntimeDirContents(files []string) ([]string, error) { runtimeDir, err := GetRuntimeDir() if err != nil { // ignore error if runtimeDir is empty - return nil, nil + return nil, nil //nolint: nilerr } runtimeDir, err = filepath.Abs(runtimeDir) if err != nil { @@ -93,48 +94,3 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index 4f2615ed3..af65f2c03 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -17,7 +17,12 @@ func Key() string { // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { - return os.Getenv(Key()) + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home } // GetShortcutString returns the string that is shortcut to user's home directory diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 34345d145..4cec8b263 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -2,16 +2,19 @@ package idtools import ( "bufio" + "errors" "fmt" "os" "os/user" + "runtime" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -35,8 +38,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" + ContainersOverrideXattr = "user.containers.override_stat" ) // MkdirAllAs creates a directory (include any along the path) and then modifies @@ -82,7 +86,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(uidMap) == 1 && uidMap[0].Size == 1 { uid = uidMap[0].HostID } else { - uid, err = toHost(0, uidMap) + uid, err = RawToHost(0, uidMap) if err != nil { return -1, -1, err } @@ -90,7 +94,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(gidMap) == 1 && gidMap[0].Size == 1 { gid = gidMap[0].HostID } else { - gid, err = toHost(0, gidMap) + gid, err = RawToHost(0, gidMap) if err != nil { return -1, -1, err } @@ -98,10 +102,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -111,13 +119,17 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return contID, nil } } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -127,7 +139,7 @@ func toHost(contID int, idMap []IDMap) (int, error) { return hostID, nil } } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) } // IDPair is a UID and GID pair @@ -146,19 +158,19 @@ type IDMappings struct { // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIDMappings(username, groupname string) (*IDMappings, error) { - subuidRanges, err := parseSubuid(username) + subuidRanges, err := readSubuid(username) if err != nil { return nil, err } - subgidRanges, err := parseSubgid(groupname) + subgidRanges, err := readSubgid(groupname) if err != nil { return nil, err } if len(subuidRanges) == 0 { - return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName) + return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName) } if len(subgidRanges) == 0 { - return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName) + return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName) } return &IDMappings{ @@ -182,31 +194,87 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = RawToHost(pair.UID, i.uids) if err != nil { - return target, err + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } } - return target, err + return target, nil } // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) + uid, err := RawToContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := RawToContainer(pair.GID, i.gids) return uid, gid, err } @@ -244,14 +312,6 @@ func createIDMap(subidRanges ranges) []IDMap { return idMap } -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - // parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) // and return all found ranges for a specified username. If the special value // "ALL" is supplied for username, then all ranges in the file will be returned @@ -282,16 +342,16 @@ func parseSubidFile(path, username string) (ranges, error) { } parts := strings.Split(text, ":") if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path) } if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { startid, err := strconv.Atoi(parts[1]) if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) } length, err := strconv.Atoi(parts[2]) if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) } rangeList = append(rangeList, subIDRange{startid, length}) } @@ -300,13 +360,33 @@ func parseSubidFile(path, username string) (ranges, error) { } func checkChownErr(err error, name string, uid, gid int) error { - if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + var e *os.PathError + if errors.As(err, &e) && e.Err == syscall.EINVAL { + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } func SafeChown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } if stat, statErr := system.Stat(name); statErr == nil { if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil @@ -316,6 +396,25 @@ func SafeChown(name string, uid, gid int) error { } func SafeLchown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } if stat, statErr := system.Lstat(name); statErr == nil { if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go new file mode 100644 index 000000000..03e787376 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -0,0 +1,87 @@ +//go:build linux && cgo && libsubid +// +build linux,cgo,libsubid + +package idtools + +import ( + "errors" + "os/user" + "unsafe" +) + +/* +#cgo LDFLAGS: -l subid +#include +#include +#include +const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + +struct subid_range get_range(struct subid_range *ranges, int i) +{ + shadow_logfd = stderr; + return ranges[i]; +} + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + +*/ +import "C" + +func readSubid(username string, isUser bool) (ranges, error) { + var ret ranges + uidstr := "" + + if username == "ALL" { + return nil, errors.New("username ALL not supported") + } + + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + + cUsername := C.CString(username) + defer C.free(unsafe.Pointer(cUsername)) + + cuidstr := C.CString(uidstr) + defer C.free(unsafe.Pointer(cuidstr)) + + var nRanges C.int + var cRanges *C.struct_subid_range + if isUser { + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges) + } + } else { + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges) + } + } + if nRanges < 0 { + return nil, errors.New("cannot read subids") + } + defer C.free(unsafe.Pointer(cRanges)) + + for i := 0; i < int(nRanges); i++ { + r := C.get_range(cRanges, C.int(i)) + newRange := subIDRange{ + Start: int(r.start), + Length: int(r.count), + } + ret = append(ret, newRange) + } + return ret, nil +} + +func readSubuid(username string) (ranges, error) { + return readSubid(username, true) +} + +func readSubgid(username string) (ranges, error) { + return readSubid(username, false) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 9776b2a12..daff1e4a9 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package idtools @@ -46,6 +47,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go new file mode 100644 index 000000000..78141fb85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go @@ -0,0 +1,12 @@ +//go:build !linux || !libsubid || !cgo +// +build !linux !libsubid !cgo + +package idtools + +func readSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func readSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go index 16be94f44..dc69c6076 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go index 1c819a1f9..042d0ea95 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/parser.go +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -11,22 +11,22 @@ import ( func parseTriple(spec []string) (container, host, size uint32, err error) { cid, err := strconv.ParseUint(spec[0], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err) } hid, err := strconv.ParseUint(spec[1], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err) } sz, err := strconv.ParseUint(spec[2], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err) } return uint32(cid), uint32(hid), uint32(sz), nil } // ParseIDMap parses idmap triples from string. func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { - stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) + stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) for _, idMapSpec := range mapSpec { if idMapSpec == "" { continue diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 9da7975e2..40e507f77 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -2,11 +2,12 @@ package idtools import ( "fmt" - "regexp" "sort" "strconv" "strings" "sync" + + "github.com/containers/storage/pkg/regexp" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard @@ -24,7 +25,7 @@ var ( "usermod": "-%s %d-%d %s", } - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) // default length for a UID/GID subordinate range defaultRangeLen = 65536 defaultRangeStart = 100000 @@ -37,32 +38,32 @@ var ( // mapping ranges in containers. func AddNamespaceRangesUser(name string) (int, int, error) { if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + return -1, -1, fmt.Errorf("adding user %q: %w", name, err) } // Query the system for the created uid and gid pair out, err := execCmd("id", name) if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err) } matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) } uid, err := strconv.Atoi(matches[1]) if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err) } gid, err := strconv.Atoi(matches[2]) if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err) } // Now we need to create the subuid/subgid ranges for our new user/group (system users // do not get auto-created ranges in subuid/subgid) if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err) } return uid, gid, nil } @@ -77,12 +78,12 @@ func addUser(userName string) error { } }) if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + return fmt.Errorf("cannot add user; no useradd/adduser binary found") } args := fmt.Sprintf(cmdTemplates[userCommand], userName) out, err := execCmd(userCommand, args) if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) } return nil } @@ -91,53 +92,53 @@ func createSubordinateRanges(name string) error { // first, we should verify that ranges weren't automatically created // by the distro tooling - ranges, err := parseSubuid(name) + ranges, err := readSubuid(name) if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err) } if len(ranges) == 0 { // no UID ranges; let's create one startID, err := findNextUIDRange() if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) + return fmt.Errorf("can't find available subuid range: %w", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) } } - ranges, err = parseSubgid(name) + ranges, err = readSubgid(name) if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err) } if len(ranges) == 0 { // no GID ranges; let's create one startID, err := findNextGIDRange() if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) + return fmt.Errorf("can't find available subgid range: %w", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) } } return nil } func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") + ranges, err := readSubuid("ALL") if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) } sort.Sort(ranges) return findNextRangeStart(ranges) } func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") + ranges, err := readSubgid("ALL") if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) } sort.Sort(ranges) return findNextRangeStart(ranges) diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go index d98b354cb..15bd98ede 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package idtools @@ -8,5 +9,5 @@ import "fmt" // and calls the appropriate helper function to add the group and then // the user to the group in /etc/group and /etc/passwd respectively. func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") + return -1, -1, fmt.Errorf("no support for adding users or groups on this OS") } diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go index 9703ecbd9..33a7dee6c 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package idtools @@ -23,7 +24,7 @@ func resolveBinary(binname string) (string, error) { if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) + return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } func execCmd(cmd, args string) ([]byte, error) { diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index cd12470f9..231d1c47b 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -2,9 +2,9 @@ package ioutils import ( "io" - "io/ioutil" "os" "path/filepath" + "time" ) // AtomicFileWriterOptions specifies options for creating the atomic file writer. @@ -14,9 +14,12 @@ type AtomicFileWriterOptions struct { // storage after it has been written and before it is moved to // the specified path. NoSync bool + // On successful return from Close() this is set to the mtime of the + // newly written file. + ModTime time.Time } -var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{} +var defaultWriterOptions = AtomicFileWriterOptions{} // SetDefaultOptions overrides the default options used when creating an // atomic file writer. @@ -28,7 +31,14 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) { // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + return newAtomicFileWriter(filename, perm, opts) +} + +// newAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) { + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return nil, err } @@ -55,28 +65,38 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err } // AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) +func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error { + f, err := newAtomicFileWriter(filename, perm, opts) if err != nil { return err } n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err + f.writeErr = err } if err1 := f.Close(); err == nil { err = err1 } + + if opts != nil { + opts.ModTime = f.modTime + } + return err } +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + return AtomicWriteFileWithOpts(filename, data, perm, nil) +} + type atomicFileWriter struct { f *os.File fn string writeErr error perm os.FileMode noSync bool + modTime time.Time } func (w *atomicFileWriter) Write(dt []byte) (int, error) { @@ -99,9 +119,25 @@ func (w *atomicFileWriter) Close() (retErr error) { return err } } + + // fstat before closing the fd + info, statErr := w.f.Stat() + if statErr == nil { + w.modTime = info.ModTime() + } + // We delay error reporting until after the real call to close() + // to match the traditional linux close() behaviour that an fd + // is invalid (closed) even if close returns failure. While + // weird, this allows a well defined way to not leak open fds. + if err := w.f.Close(); err != nil { return err } + + if statErr != nil { + return statErr + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { return err } @@ -124,7 +160,7 @@ type AtomicWriteSet struct { // commit. If no temporary directory is given the system // default is used. func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") + td, err := os.MkdirTemp(tmpDir, "write-set-") if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go index 79a094035..635489280 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ioutils diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go index 63f3c07f4..0e89787d4 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -17,8 +17,25 @@ func (r *readCloserWrapper) Close() error { return r.closer() } +type readWriteToCloserWrapper struct { + io.Reader + io.WriterTo + closer func() error +} + +func (r *readWriteToCloserWrapper) Close() error { + return r.closer() +} + // NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + if wt, ok := r.(io.WriterTo); ok { + return &readWriteToCloserWrapper{ + Reader: r, + WriterTo: wt, + closer: closer, + } + } return &readCloserWrapper{ Reader: r, closer: closer, diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go index 1539ad21b..9d5af610e 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -1,10 +1,11 @@ +//go:build !windows // +build !windows package ioutils -import "io/ioutil" +import "os" -// TempDir on Unix systems is equivalent to ioutil.TempDir. +// TempDir on Unix systems is equivalent to os.MkdirTemp. func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) + return os.MkdirTemp(dir, prefix) } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go index c719c120b..2c2242d69 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -1,16 +1,17 @@ +//go:build windows // +build windows package ioutils import ( - "io/ioutil" + "os" "github.com/containers/storage/pkg/longpath" ) -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) + tempDir, err := os.MkdirTemp(dir, prefix) if err != nil { return "", err } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 6a00141c3..ec25f8a9c 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -1,16 +1,17 @@ package lockfile import ( + "fmt" "path/filepath" "sync" "time" - - "github.com/pkg/errors" ) // A Locker represents a file lock where the file is used to cache an // identifier of the last party that made changes to whatever's being protected // by the lock. +// +// Deprecated: Refer directly to *LockFile, the provided implementation, instead. type Locker interface { // Acquire a writer lock. // The default unix implementation panics if: @@ -18,10 +19,6 @@ type Locker interface { // - tried to lock a read-only lock-file Lock() - // Acquire a writer lock recursively, allowing for recursive acquisitions - // within the same process space. - RecursiveLock() - // Unlock the lock. // The default unix implementation panics if: // - unlocking an unlocked lock @@ -33,10 +30,13 @@ type Locker interface { // Touch records, for others sharing the lock, that the caller was the // last writer. It should only be called with the lock held. + // + // Deprecated: Use *LockFile.RecordWrite. Touch() error // Modified() checks if the most recent writer was a party other than the // last recorded writer. It should only be called with the lock held. + // Deprecated: Use *LockFile.ModifiedSince. Modified() (bool, error) // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. @@ -45,63 +45,86 @@ type Locker interface { // IsReadWrite() checks if the lock file is read-write IsReadWrite() bool - // Locked() checks if lock is locked for writing by a thread in this process - Locked() bool + // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock. + AssertLocked() + + // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. + AssertLockedForWriting() } var ( - lockfiles map[string]Locker - lockfilesLock sync.Mutex + lockFiles map[string]*LockFile + lockFilesLock sync.Mutex ) +// GetLockFile opens a read-write lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetLockFile(path string) (*LockFile, error) { + return getLockfile(path, false) +} + // GetLockfile opens a read-write lock file, creating it if necessary. The // Locker object may already be locked if the path has already been requested // by the current process. +// +// Deprecated: Use GetLockFile func GetLockfile(path string) (Locker, error) { - return getLockfile(path, false) + return GetLockFile(path) +} + +// GetROLockFile opens a read-only lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetROLockFile(path string) (*LockFile, error) { + return getLockfile(path, true) } // GetROLockfile opens a read-only lock file, creating it if necessary. The // Locker object may already be locked if the path has already been requested // by the current process. +// +// Deprecated: Use GetROLockFile func GetROLockfile(path string) (Locker, error) { - return getLockfile(path, true) + return GetROLockFile(path) } -// getLockfile returns a Locker object, possibly (depending on the platform) +// getLockFile returns a *LockFile object, possibly (depending on the platform) // working inter-process, and associated with the specified path. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func getLockfile(path string, ro bool) (Locker, error) { - lockfilesLock.Lock() - defer lockfilesLock.Unlock() - if lockfiles == nil { - lockfiles = make(map[string]Locker) +func getLockfile(path string, ro bool) (*LockFile, error) { + lockFilesLock.Lock() + defer lockFilesLock.Unlock() + if lockFiles == nil { + lockFiles = make(map[string]*LockFile) } cleanPath, err := filepath.Abs(path) if err != nil { - return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path) + return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) } - if locker, ok := lockfiles[cleanPath]; ok { - if ro && locker.IsReadWrite() { - return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) + if lockFile, ok := lockFiles[cleanPath]; ok { + if ro && lockFile.IsReadWrite() { + return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) } - if !ro && !locker.IsReadWrite() { - return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath) + if !ro && !lockFile.IsReadWrite() { + return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) } - return locker, nil + return lockFile, nil } - locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker + lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile if err != nil { return nil, err } - lockfiles[cleanPath] = locker - return locker, nil + lockFiles[cleanPath] = lockFile + return lockFile, nil } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 0a92da2c0..335980914 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -1,63 +1,168 @@ +//go:build linux || solaris || darwin || freebsd // +build linux solaris darwin freebsd package lockfile import ( + "bytes" + cryptorand "crypto/rand" + "encoding/binary" "fmt" "os" + "path/filepath" "sync" + "sync/atomic" "time" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) -type lockfile struct { +// *LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { + // The following fields are only set when constructing *LockFile, and must never be modified afterwards. + // They are safe to access without any other locking. + file string + ro bool + // rwMutex serializes concurrent reader-writer acquisitions in the same process space rwMutex *sync.RWMutex // stateMutex is used to synchronize concurrent accesses to the state below stateMutex *sync.Mutex counter int64 - file string - fd uintptr - lw string + lw LastWrite // A global value valid as of the last .Touch() or .Modified() locktype int16 locked bool - ro bool - recursive bool + // The following fields are only modified on transitions between counter == 0 / counter != 0. + // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. + // In other cases, they need to be protected using stateMutex. + fd uintptr +} + +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// +// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. +type LastWrite struct { + // Never modify fields of a LastWrite object; it has value semantics. + state []byte // Contents of the lock file. +} + +const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) +var lastWriterIDCounter uint64 // Private state for newLastWriterID + +// newLastWrite returns a new "last write" ID. +// The value must be different on every call, and also differ from values +// generated by other processes. +func newLastWrite() LastWrite { + // The ID is (PID, time, per-process counter, random) + // PID + time represents both a unique process across reboots, + // and a specific time within the process; the per-process counter + // is an extra safeguard for in-process concurrency. + // The random part disambiguates across process namespaces + // (where PID values might collide), serves as a general-purpose + // extra safety, _and_ is used to pad the output to lastWriterIDSize, + // because other versions of this code exist and they don't work + // efficiently if the size of the value changes. + pid := os.Getpid() + tm := time.Now().UnixNano() + counter := atomic.AddUint64(&lastWriterIDCounter, 1) + + res := make([]byte, lastWriterIDSize) + binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) + binary.LittleEndian.PutUint64(res[8:16], counter) + binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) + if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { + panic(err) // This shouldn't happen + } + + return LastWrite{ + state: res, + } +} + +// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize +func newLastWriteFromData(serialized []byte) LastWrite { + if serialized == nil { + panic("newLastWriteFromData with nil data") + } + return LastWrite{ + state: serialized, + } +} + +// serialize returns bytes to write to the lock file to represent the specified write. +func (lw LastWrite) serialize() []byte { + if lw.state == nil { + panic("LastWrite.serialize on an uninitialized object") + } + return lw.state +} + +// Equals returns true if lw matches other +func (lw LastWrite) equals(other LastWrite) bool { + if lw.state == nil { + panic("LastWrite.equals on an uninitialized object") + } + if other.state == nil { + panic("LastWrite.equals with an uninitialized counterparty") + } + return bytes.Equal(lw.state, other.state) } // openLock opens the file at path and returns the corresponding file -// descriptor. Note that the path is opened read-only when ro is set. If ro -// is unset, openLock will open the path read-write and create the file if -// necessary. -func openLock(path string, ro bool) (int, error) { +// descriptor. The path is opened either read-only or read-write, +// depending on the value of ro argument. +// +// openLock will create the file and its parent directories, +// if necessary. +func openLock(path string, ro bool) (fd int, err error) { + flags := unix.O_CLOEXEC | os.O_CREATE if ro { - return unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0) + flags |= os.O_RDONLY + } else { + flags |= os.O_RDWR + } + fd, err = unix.Open(path, flags, 0o644) + if err == nil { + return fd, nil } - return unix.Open(path, os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH) + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return fd, fmt.Errorf("creating lock file directory: %w", err) + } + + return openLock(path, ro) + } + + return fd, &os.PathError{Op: "open", Path: path, Err: err} } -// createLockerForPath returns a Locker object, possibly (depending on the platform) +// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // // This function will be called at most once for each path value within a single process. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func createLockerForPath(path string, ro bool) (Locker, error) { +func createLockFileForPath(path string, ro bool) (*LockFile, error) { // Check if we can open the lock. fd, err := openLock(path, ro) if err != nil { - return nil, errors.Wrapf(err, "error opening %q", path) + return nil, err } unix.Close(fd) @@ -65,22 +170,24 @@ func createLockerForPath(path string, ro bool) (Locker, error) { if ro { locktype = unix.F_RDLCK } - return &lockfile{ - stateMutex: &sync.Mutex{}, + return &LockFile{ + file: path, + ro: ro, + rwMutex: &sync.RWMutex{}, - file: path, - lw: stringid.GenerateRandomID(), + stateMutex: &sync.Mutex{}, + lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. locktype: int16(locktype), locked: false, - ro: ro}, nil + }, nil } // lock locks the lockfile via FCTNL(2) based on the specified type and // command. -func (l *lockfile) lock(lType int16, recursive bool) { +func (l *LockFile) lock(lType int16) { lk := unix.Flock_t{ Type: lType, - Whence: int16(os.SEEK_SET), + Whence: int16(unix.SEEK_SET), Start: 0, Len: 0, } @@ -88,13 +195,7 @@ func (l *lockfile) lock(lType int16, recursive bool) { case unix.F_RDLCK: l.rwMutex.RLock() case unix.F_WRLCK: - if recursive { - // NOTE: that's okay as recursive is only set in RecursiveLock(), so - // there's no need to protect against hypothetical RDLCK cases. - l.rwMutex.RLock() - } else { - l.rwMutex.Lock() - } + l.rwMutex.Lock() default: panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType)) } @@ -104,7 +205,7 @@ func (l *lockfile) lock(lType int16, recursive bool) { // If we're the first reference on the lock, we need to open the file again. fd, err := openLock(l.file, l.ro) if err != nil { - panic(fmt.Sprintf("error opening %q: %v", l.file, err)) + panic(err) } l.fd = uintptr(fd) @@ -117,39 +218,27 @@ func (l *lockfile) lock(lType int16, recursive bool) { } l.locktype = lType l.locked = true - l.recursive = recursive l.counter++ } // Lock locks the lockfile as a writer. Panic if the lock is a read-only one. -func (l *lockfile) Lock() { +func (l *LockFile) Lock() { if l.ro { panic("can't take write lock on read-only lock file") } else { - l.lock(unix.F_WRLCK, false) - } -} - -// RecursiveLock locks the lockfile as a writer but allows for recursive -// acquisitions within the same process space. Note that RLock() will be called -// if it's a lockTypReader lock. -func (l *lockfile) RecursiveLock() { - if l.ro { - l.RLock() - } else { - l.lock(unix.F_WRLCK, true) + l.lock(unix.F_WRLCK) } } // LockRead locks the lockfile as a reader. -func (l *lockfile) RLock() { - l.lock(unix.F_RDLCK, false) +func (l *LockFile) RLock() { + l.lock(unix.F_RDLCK) } // Unlock unlocks the lockfile. -func (l *lockfile) Unlock() { +func (l *LockFile) Unlock() { l.stateMutex.Lock() - if l.locked == false { + if !l.locked { // Panic when unlocking an unlocked lock. That's a violation // of the lock semantics and will reveal such. panic("calling Unlock on unlocked lock") @@ -170,7 +259,7 @@ func (l *lockfile) Unlock() { // file lock. unix.Close(int(l.fd)) } - if l.locktype == unix.F_RDLCK || l.recursive { + if l.locktype == unix.F_RDLCK { l.rwMutex.RUnlock() } else { l.rwMutex.Unlock() @@ -178,68 +267,157 @@ func (l *lockfile) Unlock() { l.stateMutex.Unlock() } -// Locked checks if lockfile is locked for writing by a thread in this process. -func (l *lockfile) Locked() bool { - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - return l.locked && (l.locktype == unix.F_WRLCK) +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked” method, which exists only for sanity checks. + + // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true + // with no possible writers. + // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data + // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } } -// Touch updates the lock file with the UID of the user. -func (l *lockfile) Touch() error { - l.stateMutex.Lock() - if !l.locked || (l.locktype != unix.F_WRLCK) { - panic("attempted to update last-writer in lockfile without the write lock") +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + + l.AssertLocked() + // Like AssertLocked, don’t even bother with l.stateMutex. + if l.locktype != unix.F_WRLCK { + panic("internal error: lock is not held for writing") } - defer l.stateMutex.Unlock() - l.lw = stringid.GenerateRandomID() - id := []byte(l.lw) - _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) +} + +// GetLastWrite returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + contents := make([]byte, lastWriterIDSize) + n, err := unix.Pread(int(l.fd), contents, 0) if err != nil { - return err + return LastWrite{}, err } - n, err := unix.Write(int(l.fd), id) + // It is important to handle the partial read case, because + // the initial size of the lock file is zero, which is a valid + // state (no writes yet) + contents = contents[:n] + return newLastWriteFromData(contents), nil +} + +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + l.AssertLockedForWriting() + lw := newLastWrite() + lockContents := lw.serialize() + n, err := unix.Pwrite(int(l.fd), lockContents, 0) + if err != nil { + return LastWrite{}, err + } + if n != len(lockContents) { + return LastWrite{}, unix.ENOSPC + } + return lw, nil +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + l.AssertLocked() + currentLW, err := l.GetLastWrite() + if err != nil { + return LastWrite{}, false, err + } + modified := !previous.equals(currentLW) + return currentLW, modified, nil +} + +// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. +// +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { + lw, err := l.RecordWrite() if err != nil { return err } - if n != len(id) { - return unix.ENOSPC + l.stateMutex.Lock() + if !l.locked || (l.locktype != unix.F_WRLCK) { + panic("attempted to update last-writer in lockfile without the write lock") } + defer l.stateMutex.Unlock() + l.lw = lw return nil } // Modified indicates if the lockfile has been updated since the last time it // was loaded. -func (l *lockfile) Modified() (bool, error) { +// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. +// Callers cannot, in general, rely on this, because that might have happened for some other +// owner of the same *LockFile who created it previously. +// +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { l.stateMutex.Lock() - id := []byte(l.lw) if !l.locked { panic("attempted to check last-writer in lockfile without locking it first") } defer l.stateMutex.Unlock() - _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) + oldLW := l.lw + // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. + currentLW, modified, err := l.ModifiedSince(oldLW) if err != nil { return true, err } - n, err := unix.Read(int(l.fd), id) - if err != nil { - return true, err - } - if n != len(id) { - return true, nil - } - lw := l.lw - l.lw = string(id) - return l.lw != lw, nil + l.lw = currentLW + return modified, nil } // IsReadWriteLock indicates if the lock file is a read-write lock. -func (l *lockfile) IsReadWrite() bool { +func (l *LockFile) IsReadWrite() bool { return !l.ro } // TouchedSince indicates if the lock file has been touched since the specified time -func (l *lockfile) TouchedSince(when time.Time) bool { +func (l *LockFile) TouchedSince(when time.Time) bool { st, err := system.Fstat(int(l.fd)) if err != nil { return true diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 82bd91db9..09f2aca5c 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package lockfile @@ -8,65 +9,140 @@ import ( "time" ) -// createLockerForPath returns a Locker object, possibly (depending on the platform) +// createLockFileForPath returns a *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // // This function will be called at most once for each path value within a single process. // -// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. // // WARNING: // - The lock may or MAY NOT be inter-process. // - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - Even if ro, the lock MAY be exclusive. -func createLockerForPath(path string, ro bool) (Locker, error) { - return &lockfile{locked: false}, nil +func createLockFileForPath(path string, ro bool) (*LockFile, error) { + return &LockFile{locked: false}, nil } -type lockfile struct { +// *LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { mu sync.Mutex file string locked bool } -func (l *lockfile) Lock() { - l.mu.Lock() - l.locked = true +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes. +type LastWrite struct { + // Nothing: The Windows “implementation” does not actually track writes. } -func (l *lockfile) RecursiveLock() { - // We don't support Windows but a recursive writer-lock in one process-space - // is really a writer lock, so just panic. - panic("not supported") +func (l *LockFile) Lock() { + l.mu.Lock() + l.locked = true } -func (l *lockfile) RLock() { +func (l *LockFile) RLock() { l.mu.Lock() l.locked = true } -func (l *lockfile) Unlock() { +func (l *LockFile) Unlock() { l.locked = false l.mu.Unlock() } -func (l *lockfile) Locked() bool { - return l.locked +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked” method, which exists only for sanity checks. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } +} + +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + l.AssertLocked() // The current implementation does not distinguish between read and write locks. +} + +// GetLastWrite() returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing) before this function is called. +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + return LastWrite{}, nil } -func (l *lockfile) Modified() (bool, error) { +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + return LastWrite{}, nil +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + return LastWrite{}, false, nil +} + +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { return false, nil } -func (l *lockfile) Touch() error { + +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { return nil } -func (l *lockfile) IsReadWrite() bool { +func (l *LockFile) IsReadWrite() bool { return false } -func (l *lockfile) TouchedSince(when time.Time) bool { +func (l *LockFile) TouchedSince(when time.Time) bool { stat, err := os.Stat(l.file) if err != nil { return true diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index e2cf30b41..de10e3324 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback @@ -43,7 +44,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File var st syscall.Stat_t err = syscall.Fstat(int(sparseFile.Fd()), &st) if err != nil { - logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err) + logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err) return nil, ErrAttachLoopbackDevice } @@ -68,7 +69,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { - logrus.Errorf("Error opening loopback device: %s", err) + logrus.Errorf("Opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } @@ -90,7 +91,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // device and inode numbers. dev, ino, err := getLoopbackBackingFile(loopFile) if err != nil { - logrus.Errorf("Error getting loopback backing file: %s", err) + logrus.Errorf("Getting loopback backing file: %s", err) return nil, ErrGetLoopbackBackingFile } if dev != uint64(st.Dev) || ino != st.Ino { @@ -125,7 +126,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { - logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + logrus.Errorf("Opening sparse file: %v", err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() @@ -147,7 +148,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - logrus.Error("Error while cleaning up the loopback device") + logrus.Error("While cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go index ea6841958..da2ba46fe 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go index a50de7f07..21a981007 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go index f4cf2826e..ec4287247 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package loopback @@ -13,7 +14,7 @@ import ( func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { - logrus.Errorf("Error get loopback backing file: %s", err) + logrus.Errorf("Get loopback backing file: %v", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil @@ -22,7 +23,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { // SetCapacity reloads the size for the loopback device. func SetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - logrus.Errorf("Error loopbackSetCapacity: %s", err) + logrus.Errorf("loopbackSetCapacity: %s", err) return ErrSetCapacity } return nil diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go index 07a0f4847..5de3a671d 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -99,7 +99,7 @@ func MergeTmpfsOptions(options []string) ([]string, error) { } opt := strings.SplitN(option, "=", 2) if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + return nil, fmt.Errorf("invalid tmpfs option %q", opt) } if !dataCollisions[opt[0]] { // We prepend the option and add to collision map @@ -142,7 +142,7 @@ func ParseTmpfsOptions(options string) (int, string, error) { for _, o := range strings.Split(data, ",") { opt := strings.SplitN(o, "=", 2) if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) } } return flags, data, nil diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 000000000..3ba99cf93 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c..ee0f593a5 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0..c70b0bf99 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -1,3 +1,6 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + package mount /* @@ -28,14 +31,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { @@ -48,7 +62,7 @@ func mount(device, target, mType string, flag uintptr, data string) error { if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) + return fmt.Errorf("failed to call nmount: %s", reason) } return nil } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go index 9d20cfbf8..74fe66609 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -1,4 +1,6 @@ -// +build !linux,!freebsd +//go:build !linux && !(freebsd && cgo) +// +build !linux +// +build !freebsd !cgo package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go index 1d1afeee2..a2a1d4072 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -1,16 +1,29 @@ +//go:build !windows // +build !windows package mount -import "golang.org/x/sys/unix" +import ( + "time" + + "golang.org/x/sys/unix" +) func unmount(target string, flags int) error { - err := unix.Unmount(target, flags) - if err == nil || err == unix.EINVAL { - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil + var err error + for i := 0; i < 50; i++ { + err = unix.Unmount(target, flags) + switch err { + case unix.EBUSY: + time.Sleep(50 * time.Millisecond) + continue + case unix.EINVAL, nil: + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + break } return &mountError{ diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go index eebc4ab84..d3a0cf51c 100644 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package mount diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go index 7738fc741..6406cb14f 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Package kernel provides helper function to get, parse and compare kernel diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go index 71f205b28..645790da6 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin // Package kernel provides helper function to get, parse and compare kernel @@ -37,16 +38,16 @@ func getRelease() (string, error) { // It has the format like ' Kernel Version: Darwin 14.5.0' content := strings.SplitN(line, ":", 2) if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") + return "", fmt.Errorf("kernel version is invalid") } prettyNames, err := shellwords.Parse(content[1]) if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + return "", fmt.Errorf("kernel version is invalid: %w", err) } if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ") } release = prettyNames[1] } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go index 76e1e499f..ed8cca2c6 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd || solaris || openbsd // +build linux freebsd solaris openbsd // Package kernel provides helper function to get, parse and compare kernel @@ -35,7 +36,7 @@ func GetKernelVersion() (*VersionInfo, error) { // the given version. func CheckKernelVersion(k, major, minor int) bool { if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) + logrus.Warnf("Error getting kernel version: %s", err) } else { if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { return false diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index 3d3829236..b30da9fad 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package kernel diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go new file mode 100644 index 000000000..e913fad00 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go index 1da3f239f..12671db51 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -1,18 +1,14 @@ -// +build !linux,!solaris +//go:build openbsd +// +build openbsd package kernel import ( - "errors" + "fmt" + "runtime" ) -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - +// A stub called by kernel_unix.go . func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") + return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS) } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go new file mode 100644 index 000000000..f515500c9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go @@ -0,0 +1,11 @@ +//go:build !linux && !solaris && !freebsd +// +build !linux,!solaris,!freebsd + +package kernel + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go index acc897168..85c23381d 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -13,7 +13,7 @@ import ( func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + return "", "", fmt.Errorf("unable to parse key/value option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 000000000..32d6a9f49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,38 @@ +//go:build freebsd +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 372bee732..87b43ed95 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package reexec @@ -17,6 +18,7 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +28,7 @@ func Command(args ...string) *exec.Cmd { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 1ecaa906f..a56ada216 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec @@ -17,6 +18,7 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -24,6 +26,7 @@ func Command(args ...string) *exec.Cmd { // CommandContext returns *exec.Cmd which has Path as current binary. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9d9374268..77c93b4ab 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows && !freebsd && !solaris && !darwin // +build !linux,!windows,!freebsd,!solaris,!darwin package reexec @@ -9,10 +10,12 @@ import ( // Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } // CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 673ab476a..c46125ebf 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package reexec @@ -17,6 +18,7 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +28,7 @@ func Command(args ...string) *exec.Cmd { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index c56671d91..a1938cd4f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -7,7 +7,10 @@ import ( "path/filepath" ) -var registeredInitializers = make(map[string]func()) +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { @@ -22,6 +25,7 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true if exists { initializer() @@ -30,6 +34,21 @@ func Init() bool { return false } +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go new file mode 100644 index 000000000..ec8797106 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go @@ -0,0 +1,214 @@ +package regexp + +import ( + "io" + "regexp" + "sync" +) + +// Regexp is a wrapper struct used for wrapping MustCompile regex expressions +// used as global variables. Using this stucture helps speed the startup time +// of apps that want to use global regex variables. This library initializes them on +// first use as opposed to the start of the executable. +type Regexp struct { + once sync.Once + regexp *regexp.Regexp + val string +} + +func Delayed(val string) Regexp { + re := Regexp{ + val: val, + } + if precompile { + re.regexp = regexp.MustCompile(re.val) + } + return re +} + +func (re *Regexp) compile() { + if precompile { + return + } + re.once.Do(func() { + re.regexp = regexp.MustCompile(re.val) + }) +} + +func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + re.compile() + return re.regexp.Expand(dst, template, src, match) +} + +func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { + re.compile() + return re.regexp.ExpandString(dst, template, src, match) +} +func (re *Regexp) Find(b []byte) []byte { + re.compile() + return re.regexp.Find(b) +} + +func (re *Regexp) FindAll(b []byte, n int) [][]byte { + re.compile() + return re.regexp.FindAll(b, n) +} + +func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllIndex(b, n) +} + +func (re *Regexp) FindAllString(s string, n int) []string { + re.compile() + return re.regexp.FindAllString(s, n) +} + +func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringIndex(s, n) +} + +func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + re.compile() + return re.regexp.FindAllStringSubmatch(s, n) +} + +func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringSubmatchIndex(s, n) +} + +func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { + re.compile() + return re.regexp.FindAllSubmatch(b, n) +} + +func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllSubmatchIndex(b, n) +} + +func (re *Regexp) FindIndex(b []byte) (loc []int) { + re.compile() + return re.regexp.FindIndex(b) +} + +func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { + re.compile() + return re.regexp.FindReaderIndex(r) +} + +func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { + re.compile() + return re.regexp.FindReaderSubmatchIndex(r) +} + +func (re *Regexp) FindString(s string) string { + re.compile() + return re.regexp.FindString(s) +} + +func (re *Regexp) FindStringIndex(s string) (loc []int) { + re.compile() + return re.regexp.FindStringIndex(s) +} + +func (re *Regexp) FindStringSubmatch(s string) []string { + re.compile() + return re.regexp.FindStringSubmatch(s) +} + +func (re *Regexp) FindStringSubmatchIndex(s string) []int { + re.compile() + return re.regexp.FindStringSubmatchIndex(s) +} + +func (re *Regexp) FindSubmatch(b []byte) [][]byte { + re.compile() + return re.regexp.FindSubmatch(b) +} + +func (re *Regexp) FindSubmatchIndex(b []byte) []int { + re.compile() + return re.regexp.FindSubmatchIndex(b) +} + +func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { + re.compile() + return re.regexp.LiteralPrefix() +} + +func (re *Regexp) Longest() { + re.compile() + re.regexp.Longest() +} + +func (re *Regexp) Match(b []byte) bool { + re.compile() + return re.regexp.Match(b) +} + +func (re *Regexp) MatchReader(r io.RuneReader) bool { + re.compile() + return re.regexp.MatchReader(r) +} +func (re *Regexp) MatchString(s string) bool { + re.compile() + return re.regexp.MatchString(s) +} + +func (re *Regexp) NumSubexp() int { + re.compile() + return re.regexp.NumSubexp() +} + +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAll(src, repl) +} + +func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + re.compile() + return re.regexp.ReplaceAllFunc(src, repl) +} + +func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAllLiteral(src, repl) +} + +func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllLiteralString(src, repl) +} + +func (re *Regexp) ReplaceAllString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllString(src, repl) +} + +func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + re.compile() + return re.regexp.ReplaceAllStringFunc(src, repl) +} + +func (re *Regexp) Split(s string, n int) []string { + re.compile() + return re.regexp.Split(s, n) +} + +func (re *Regexp) String() string { + re.compile() + return re.regexp.String() +} + +func (re *Regexp) SubexpIndex(name string) int { + re.compile() + return re.regexp.SubexpIndex(name) +} + +func (re *Regexp) SubexpNames() []string { + re.compile() + return re.regexp.SubexpNames() +} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go new file mode 100644 index 000000000..834dd9433 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go @@ -0,0 +1,6 @@ +//go:build !regexp_precompile +// +build !regexp_precompile + +package regexp + +const precompile = false diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go new file mode 100644 index 000000000..a5fe0dbc4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go @@ -0,0 +1,6 @@ +//go:build regexp_precompile +// +build regexp_precompile + +package regexp + +const precompile = true diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index a0c7c42a0..3ae44fd8a 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -9,17 +9,22 @@ import ( "math" "math/big" "math/rand" - "regexp" "strconv" "strings" + "sync" "time" + + "github.com/containers/storage/pkg/regexp" ) const shortLen = 12 var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) + validShortID = regexp.Delayed("^[a-f0-9]{12}$") + validHex = regexp.Delayed(`^[a-f0-9]{64}$`) + + rngLock sync.Mutex + rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held. ) // IsShortID determines if an arbitrary string *looks like* a short ID. @@ -67,7 +72,9 @@ func GenerateRandomID() string { // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(readerFunc(rand.Read)) + rngLock.Lock() + defer rngLock.Unlock() + return generateID(readerFunc(rng.Read)) } // ValidateID checks whether an ID string is a valid image ID. @@ -79,7 +86,7 @@ func ValidateID(id string) error { } func init() { - // safely set the seed globally so we generate random ids. Tries to use a + // Initialize a private RNG so we generate random ids. Tries to use a // crypto seed before falling back to time. var seed int64 if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { @@ -89,7 +96,7 @@ func init() { seed = cryptoseed.Int64() } - rand.Seed(seed) + rng = rand.New(rand.NewSource(seed)) } type readerFunc func(p []byte) (int, error) diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go index 09d58bcbf..a208a6b5b 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go index 45428c141..bfb621dcc 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go index 17935088d..05642f603 100644 --- a/vendor/github.com/containers/storage/pkg/system/init.go +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -6,7 +6,7 @@ import ( "unsafe" ) -// Used by chtimes +// maxTime is used by chtimes. var maxTime time.Time func init() { diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go new file mode 100644 index 000000000..4eaeb5d69 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go @@ -0,0 +1,56 @@ +//go:build freebsd +// +build freebsd + +package system + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Flag values from +const ( + /* + * Definitions of flags stored in file flags word. + * + * Super-user and owner changeable flags. + */ + UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */ + UF_NODUMP uint32 = 0x00000001 /* do not dump file */ + UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */ + UF_APPEND uint32 = 0x00000004 /* writes to file may only append */ + UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */ + UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */ + + UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */ + UF_SPARSE uint32 = 0x00000100 /* sparse file */ + UF_OFFLINE uint32 = 0x00000200 /* file is offline */ + UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */ + UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */ + UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */ + /* This is the same as the MacOS X definition of UF_HIDDEN. */ + UF_HIDDEN uint32 = 0x00008000 /* file is hidden */ + + /* + * Super-user changeable flags. + */ + SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */ + SF_ARCHIVED uint32 = 0x00010000 /* file is archived */ + SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */ + SF_APPEND uint32 = 0x00040000 /* writes to file may only append */ + SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */ + SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */ +) + +func Lchflags(path string, flags uint32) error { + p, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + return e1 + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go index cff33bb40..42658c8b9 100644 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go index e9d301f09..9b13e6146 100644 --- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system @@ -14,7 +15,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{"Lstat", path, err} + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go new file mode 100644 index 000000000..46cb40291 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go @@ -0,0 +1,85 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + +package system + +import ( + "errors" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #include +// #include +import "C" + +func getMemInfo() (int64, int64, error) { + data, err := unix.SysctlRaw("vm.vmtotal") + if err != nil { + return -1, -1, fmt.Errorf("can't get kernel info: %w", err) + } + if len(data) != C.sizeof_struct_vmtotal { + return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) + } + + total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) + + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + npages := int64(C.sysconf(C._SC_PHYS_PAGES)) + return pagesize * npages, pagesize * int64(total.t_free), nil +} + +func getSwapInfo() (int64, int64, error) { + var ( + total int64 = 0 + used int64 = 0 + ) + swapCount, err := unix.SysctlUint32("vm.nswapdev") + if err != nil { + return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err) + } + for i := 0; i < int(swapCount); i++ { + data, err := unix.SysctlRaw("vm.swap_info", i) + if err != nil { + return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err) + } + if len(data) != C.sizeof_struct_xswdev { + return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) + } + xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) + total += int64(xsw.xsw_nblks) + used += int64(xsw.xsw_used) + } + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + return pagesize * total, pagesize * (total - used), nil +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + MemTotal, MemFree, err := getMemInfo() + if err != nil { + return nil, fmt.Errorf("getting memory totals %w", err) + } + SwapTotal, SwapFree, err := getSwapInfo() + if err != nil { + return nil, fmt.Errorf("getting swap totals %w", err) + } + + if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { + return nil, errors.New("getting system memory info") + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index 925776e78..d727b545c 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package system @@ -90,7 +91,7 @@ func ReadMemInfo() (*MemInfo, error) { if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) + return nil, fmt.Errorf("getting system memory info %w", err) } meminfo := &MemInfo{} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go index 3ce019dff..0f9feb1d2 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -1,4 +1,8 @@ -// +build !linux,!windows,!solaris +//go:build !linux && !windows && !solaris && !(freebsd && cgo) +// +build !linux +// +build !windows +// +build !solaris +// +build !freebsd !cgo package system diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go index c276ce8e8..d3d0ed8a1 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -1,3 +1,4 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd package system @@ -8,8 +9,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) +func Mknod(path string, mode uint32, dev uint32) error { + return unix.Mknod(path, mode, int(dev)) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go index d09005589..53c3f2837 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package system @@ -17,6 +18,6 @@ func Mknod(path string, mode uint32, dev uint64) error { // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +func Mkdev(major int64, minor int64) uint64 { + return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go index 2e863c021..c35b1b346 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go index f3762e69d..ff01143ee 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go index aab891522..9f2509738 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system @@ -21,13 +22,13 @@ import ( // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) + return "", fmt.Errorf("relative path not specified in %q", path) } if !filepath.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") + return "", fmt.Errorf("specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil } diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go index a9a0dd751..7ee59d926 100644 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd || solaris || darwin // +build linux freebsd solaris darwin package system diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 510e71428..60c7d8bd9 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -1,12 +1,13 @@ package system import ( + "errors" + "fmt" "os" "syscall" "time" "github.com/containers/storage/pkg/mount" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -40,6 +41,19 @@ func EnsureRemoveAll(dir string) error { return nil } + // If the RemoveAll fails with a permission error, we + // may have immutable files so try to remove the + // immutable flag and redo the RemoveAll. + if errors.Is(err, syscall.EPERM) { + if err = resetFileFlags(dir); err != nil { + return fmt.Errorf("resetting file flags: %w", err) + } + err = os.RemoveAll(dir) + if err == nil { + return nil + } + } + pe, ok := err.(*os.PathError) if !ok { return err @@ -62,12 +76,12 @@ func EnsureRemoveAll(dir string) error { continue } - if pe.Err != syscall.EBUSY { + if !IsEBUSY(pe.Err) { return err } if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) + return fmt.Errorf("while removing %s: %w", dir, e) } if exitOnErr[pe.Path] == maxRetry { diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go new file mode 100644 index 000000000..117eb1d6d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go @@ -0,0 +1,10 @@ +//go:build !freebsd +// +build !freebsd + +package system + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go new file mode 100644 index 000000000..39a5de7aa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go @@ -0,0 +1,17 @@ +package system + +import ( + "io/fs" + "path/filepath" +) + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err := Lchflags(path, 0); err != nil { + return err + } + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go new file mode 100644 index 000000000..e965c54c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -0,0 +1,13 @@ +//go:build !freebsd +// +build !freebsd + +package system + +type platformStatT struct { +} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms) + return 0 +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index 715f05b93..9c510468f 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -2,12 +2,25 @@ package system import "syscall" +type platformStatT struct { + flags uint32 +} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + return s.flags +} + // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + st := &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + dev: s.Dev} + st.flags = s.Flags + st.dev = s.Dev + return st, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index af7af20fa..e5dcba822 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -9,7 +9,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + dev: uint64(s.Dev)}, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go index 2fac918bf..47ae899f8 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system @@ -17,6 +18,8 @@ type StatT struct { rdev uint64 size int64 mtim syscall.Timespec + dev uint64 + platformStatT } // Mode returns file's permission mode. @@ -49,6 +52,11 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return s.dev +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index d30636052..81edaadbb 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -11,6 +11,7 @@ type StatT struct { mode os.FileMode size int64 mtim time.Time + platformStatT } // Size returns file's size. @@ -42,6 +43,11 @@ func (s StatT) GID() uint32 { return 0 } +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index 49dbdd378..c4816c133 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -1,8 +1,13 @@ -// +build linux freebsd +//go:build linux || freebsd || darwin +// +build linux freebsd darwin package system -import "golang.org/x/sys/unix" +import ( + "errors" + + "golang.org/x/sys/unix" +) // Unmount is a platform-specific helper function to call // the unmount syscall. @@ -15,3 +20,8 @@ func Unmount(dest string) error { func CommandLineToArgv(commandLine string) ([]string, error) { return []string{commandLine}, nil } + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return errors.Is(err, unix.EBUSY) +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go index 23e9b207c..f4d8692cd 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -120,3 +120,8 @@ func HasWin32KSupport() bool { // APIs. return ntuserApiset.Load() == nil } + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go index 5a10eda5a..ad0337db7 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go index 13f1de176..9497596a0 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go index 6a7752437..edc588a63 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -10,13 +10,14 @@ import ( // LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + var _path *byte _path, err := unix.BytePtrFromString(path) if err != nil { return err } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go index 139714544..843ecdc53 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package system diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go new file mode 100644 index 000000000..75275b964 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go @@ -0,0 +1,84 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENOATTR: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848b..6b47c4e71 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5..8bd7acf1f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !darwin +// +build !linux,!darwin package system @@ -10,6 +11,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go index 26cd8504c..674e0a0ba 100644 --- a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go +++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go @@ -34,7 +34,7 @@ func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) { } // Make sure to avoid writes after the reader has been closed. if err := reader.Close(); err != nil { - logrus.Errorf("error closing tarlogger reader: %v", err) + logrus.Errorf("Closing tarlogger reader: %v", err) } // Unblock the Close(). t.closeMutex.Unlock() diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go index 74776e65e..8f0732651 100644 --- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -14,7 +14,7 @@ import ( var ( // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") + ErrEmptyPrefix = errors.New("prefix can't be empty") // ErrIllegalChar is returned when a space is in the ID ErrIllegalChar = errors.New("illegal character: ' '") @@ -25,7 +25,7 @@ var ( // ErrAmbiguousPrefix is returned if the prefix was ambiguous // (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { +type ErrAmbiguousPrefix struct { //nolint: errname prefix string } @@ -42,6 +42,7 @@ type TruncIndex struct { } // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +// Invalid IDs are _silently_ ignored. func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), @@ -51,7 +52,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) { trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), } for _, id := range ids { - idx.addID(id) + _ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem. } return } @@ -132,7 +133,8 @@ func (idx *TruncIndex) Get(s string) (string, error) { func (idx *TruncIndex) Iterate(handler func(id string)) { idx.Lock() defer idx.Unlock() - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + // Ignore the error from Visit: it can only fail if the provided visitor fails, and ours never does. + _ = idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { handler(string(prefix)) return nil }) diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go index 4f441c32c..08dbc661d 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go index a5005403a..25054810a 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c index c0e359b27..f5a7c3a25 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -1,4 +1,4 @@ -#ifndef UNSHARE_NO_CODE_AT_ALL +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) #define _GNU_SOURCE #include diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go index 53cfeb0ec..00f397f35 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -5,19 +5,12 @@ import ( "os" "os/user" "sync" - - "github.com/pkg/errors" - "github.com/syndtr/gocapability/capability" ) var ( homeDirOnce sync.Once homeDirErr error homeDir string - - hasCapSysAdminOnce sync.Once - hasCapSysAdminRet bool - hasCapSysAdminErr error ) // HomeDir returns the home directory for the current user. @@ -27,7 +20,7 @@ func HomeDir() (string, error) { if home == "" { usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) if err != nil { - homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory") + homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) return } homeDir, homeDirErr = usr.HomeDir, nil @@ -37,20 +30,3 @@ func HomeDir() (string, error) { }) return homeDir, homeDirErr } - -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - hasCapSysAdminOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - hasCapSysAdminErr = err - return - } - if err = currentCaps.Load(); err != nil { - hasCapSysAdminErr = err - return - } - hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) - }) - return hasCapSysAdminRet, hasCapSysAdminErr -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go index b3f8099f6..fbfb90d59 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -1,10 +1,11 @@ -// +build linux,cgo,!gccgo +//go:build (linux && cgo && !gccgo) || (freebsd && cgo) +// +build linux,cgo,!gccgo freebsd,cgo package unshare // #cgo CFLAGS: -Wall // extern void _containers_unshare(void); -// void __attribute__((constructor)) init(void) { +// static void __attribute__((constructor)) init(void) { // _containers_unshare(); // } import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go new file mode 100644 index 000000000..86ac12eca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -0,0 +1,54 @@ +//go:build darwin +// +build darwin + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return true +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c new file mode 100644 index 000000000..0b2f17886 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c @@ -0,0 +1,76 @@ +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) + + +#include +#include +#include +#include +#include +#include + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +void _containers_unshare(void) +{ + int pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp(0, 0) == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } +} + +#endif diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go new file mode 100644 index 000000000..f52760abb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go @@ -0,0 +1,179 @@ +//go:build freebsd +// +build freebsd + +package unshare + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strconv" + "syscall" + + "github.com/containers/storage/pkg/reexec" + "github.com/sirupsen/logrus" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), +// and one day might handle setting ID maps and other related setting*s +// by triggering initialization code in the child. +type Cmd struct { + *exec.Cmd + Setsid bool + Setpgrp bool + Ctty *os.File + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set environment variables to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return fmt.Errorf("reading child PID: %w", err) + } + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return fmt.Errorf("parsing PID %q: %w", pidString, err) + } + + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +type Runnable interface { + Run() error +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Debugf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Debugf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(Unable to determine exit status)") + exit(1) + } + exit(0) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go index 2f95da7d8..21a43d38c 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -1,10 +1,11 @@ +//go:build linux && cgo && gccgo // +build linux,cgo,gccgo package unshare // #cgo CFLAGS: -Wall -Wextra // extern void _containers_unshare(void); -// void __attribute__((constructor)) init(void) { +// static void __attribute__((constructor)) init(void) { // _containers_unshare(); // } import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 96b857543..86922846e 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -5,10 +6,12 @@ package unshare import ( "bufio" "bytes" + "errors" "fmt" "io" "os" "os/exec" + "os/signal" "os/user" "runtime" "strconv" @@ -19,7 +22,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" ) @@ -75,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capability +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -95,7 +119,7 @@ func (c *Cmd) Start() error { // Create the pipe for reading the child's PID. pidRead, pidWrite, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pid pipe") + return fmt.Errorf("creating pid pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, pidWrite) @@ -105,7 +129,7 @@ func (c *Cmd) Start() error { if err != nil { pidRead.Close() pidWrite.Close() - return errors.Wrapf(err, "error creating pid pipe") + return fmt.Errorf("creating pid pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, continueRead) @@ -154,13 +178,13 @@ func (c *Cmd) Start() error { pidString := "" b := new(bytes.Buffer) if _, err := io.Copy(b, pidRead); err != nil { - return errors.Wrapf(err, "error reading child PID") + return fmt.Errorf("reading child PID: %w", err) } pidString = b.String() pid, err := strconv.Atoi(pidString) if err != nil { fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return errors.Wrapf(err, "error parsing PID %q", pidString) + return fmt.Errorf("parsing PID %q: %w", pidString, err) } pidString = fmt.Sprintf("%d", pid) @@ -170,18 +194,18 @@ func (c *Cmd) Start() error { setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) } defer setgroups.Close() if c.GidMappingsEnableSetgroups { if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) - return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err) } } else { if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) - return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err) } } @@ -189,7 +213,7 @@ func (c *Cmd) Start() error { uidmap, gidmap, err := GetHostIDMappings("") if err != nil { fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) - return errors.Wrapf(err, "error reading ID mappings in parent") + return fmt.Errorf("reading ID mappings in parent: %w", err) } if len(c.UidMappings) == 0 { c.UidMappings = uidmap @@ -214,16 +238,27 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return fmt.Errorf("finding newgidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { - logrus.Warnf("error running newgidmap: %v: %s", err, g.String()) - logrus.Warnf("falling back to single mapping") + logrus.Warnf("running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } + logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) } @@ -233,23 +268,23 @@ func (c *Cmd) Start() error { setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) } defer setgroups.Close() if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) - return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err) } } gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err) + return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err) } defer gidmap.Close() if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) - return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString) + fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err) } } } @@ -261,18 +296,30 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return fmt.Errorf("finding newuidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { - logrus.Warnf("error running newuidmap: %v: %s", err, u.String()) - logrus.Warnf("falling back to single mapping") + logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) } @@ -281,12 +328,12 @@ func (c *Cmd) Start() error { uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err) } defer uidmap.Close() if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) - return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString) + return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err) } } } @@ -296,12 +343,12 @@ func (c *Cmd) Start() error { oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err) } defer oomScoreAdj.Close() if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) - return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err) } } // Run any additional setup that we want to do before the child starts running proper. @@ -340,10 +387,47 @@ const ( UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" ) +// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped. +func hasFullUsersMappings() (bool, error) { + content, err := os.ReadFile("/proc/self/uid_map") + if err != nil { + return false, err + } + // The kernel rejects attempts to create mappings where either starting + // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 . + // So, if the uid_map contains 4294967295, the entire IDs space is available in the + // user namespace, so it is likely the initial user namespace. + return bytes.Contains(content, []byte("4294967295")), nil +} + +var ( + hasCapSysAdminOnce sync.Once + hasCapSysAdminRet bool + hasCapSysAdminErr error +) + // IsRootless tells us if we are running in rootless mode func IsRootless() bool { isRootlessOnce.Do(func() { isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" + if !isRootless { + hasCapSysAdmin, err := HasCapSysAdmin() + if err != nil { + logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") + } + if err == nil && !hasCapSysAdmin { + isRootless = true + } + } + if !isRootless { + hasMappings, err := hasFullUsersMappings() + if err != nil { + logrus.Warnf("Failed to read current user namespace mappings") + } + if err == nil && !hasMappings { + isRootless = true + } + } }) return isRootless } @@ -381,7 +465,7 @@ func bailOnError(err error, format string, a ...interface{}) { // nolint: golint // MaybeReexecUsingUserNamespace re-exec the process in a new namespace func MaybeReexecUsingUserNamespace(evenForRoot bool) { // If we've already been through this once, no need to try again. - if os.Geteuid() == 0 && IsRootless() { + if os.Geteuid() == 0 && GetRootlessUID() > 0 { return } @@ -407,7 +491,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // ID and a range size. uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) if err != nil { - logrus.Warnf("error reading allowed ID mappings: %v", err) + logrus.Warnf("Reading allowed ID mappings: %v", err) } if len(uidmap) == 0 { logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) @@ -434,13 +518,13 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able // to use unshare(), so don't bother creating a new user namespace at this point. capabilities, err := capability.NewPid(0) - bailOnError(err, "error reading the current capabilities sets") + bailOnError(err, "Reading the current capabilities sets") if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { return } // Read the set of ID mappings that we're currently using. uidmap, gidmap, err = GetHostIDMappings("") - bailOnError(err, "error reading current ID mappings") + bailOnError(err, "Reading current ID mappings") // Just reuse them. for i := range uidmap { uidmap[i].HostID = uidmap[i].ContainerID @@ -463,7 +547,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { - logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err) + logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err) os.Exit(1) } } @@ -483,7 +567,31 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { cmd.GidMappingsEnableSetgroups = true // Finish up. - logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + ExecRunnable(cmd, nil) } @@ -497,22 +605,22 @@ func ExecRunnable(cmd Runnable, cleanup func()) { os.Exit(status) } if err := cmd.Run(); err != nil { - if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { + if exitError, ok := err.(*exec.ExitError); ok { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(waitStatus.ExitStatus()) } if waitStatus.Signaled() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(int(waitStatus.Signal()) + 128) } } } } logrus.Errorf("%v", err) - logrus.Errorf("(unable to determine exit status)") + logrus.Errorf("(Unable to determine exit status)") exit(1) } exit(0) @@ -523,7 +631,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { var mappings []specs.LinuxIDMapping f, err := os.Open(path) if err != nil { - return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) + return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) } defer f.Close() scanner := bufio.NewScanner(f) @@ -531,19 +639,19 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { line := scanner.Text() fields := strings.Fields(line) if len(fields) != 3 { - return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) } cid, err := strconv.ParseUint(fields[0], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) } hid, err := strconv.ParseUint(fields[1], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) } size, err := strconv.ParseUint(fields[2], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) } mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) } @@ -571,7 +679,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { mappings, err := idtools.NewIDMappings(user, group) if err != nil { - return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) + return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err) } var uidmap, gidmap []specs.LinuxIDMapping for _, m := range mappings.UIDs() { @@ -603,3 +711,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, } return uid, gid, nil } + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + hasCapSysAdminOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + hasCapSysAdminErr = err + return + } + if err = currentCaps.Load(); err != nil { + hasCapSysAdminErr = err + return + } + hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) + }) + return hasCapSysAdminRet, hasCapSysAdminErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index bf4d567b8..66dd54596 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !darwin +// +build !linux,!darwin package unshare @@ -43,3 +44,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { return nil, nil, nil } + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + return os.Geteuid() == 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go index d5f2d22a8..a6b38eda8 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -1,4 +1,5 @@ -// +build !linux,cgo +//go:build cgo && !(linux || freebsd) +// +build cgo,!linux,!freebsd package unshare diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index e70f4f018..6c419a952 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,5 +1,14 @@ -# This file is is the configuration file for all tools -# that use the containers/storage library. +# This file is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] @@ -11,12 +20,22 @@ driver = "overlay" runroot = "/run/containers/storage" # Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" + # Storage path for rootless users # # rootless_storage_path = "$HOME/.local/share/containers/storage" +# Transient store mode makes all container metadata be saved in temporary storage +# (i.e. runroot above). This is faster, but doesn't persist across reboots. +# transient_store = true + [storage.options] # Storage options to be passed to underlying storage drivers @@ -25,6 +44,28 @@ graphroot = "/var/lib/containers/storage" additionalimagestores = [ ] +# Allows specification of how storage is populated when pulling images. This +# option can speed the pulling process of images compressed with format +# zstd:chunked. Containers/storage looks for files within images that are being +# pulled from a container registry that were previously pulled to the host. It +# can copy or create a hard link to the existing file when it finds them, +# eliminating the need to pull them from the container registry. These options +# can deduplicate pulling of content, disk storage of content and can allow the +# kernel to use less memory when running containers. + +# containers/storage supports four keys +# * enable_partial_images="true" | "false" +# Tells containers/storage to look for files previously pulled in storage +# rather then always pulling them from the container registry. +# * use_hard_links = "false" | "true" +# Tells containers/storage to use hard links rather then create new files in +# the image, if an identical file already existed in storage. +# * ostree_repos = "" +# Tells containers/storage where an ostree repository exists that might have +# previously pulled content which can be used when attempting to avoid +# pulling content from the container registry +pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} + # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, # and the length of the range of UIDs/GIDs. Additional mapped sets can be @@ -69,6 +110,9 @@ additionalimagestores = [ # and vfs drivers. #ignore_chown_errors = "false" +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + # Path to an helper program to use for mounting the file system instead of mounting it # directly. #mount_program = "/usr/bin/fuse-overlayfs" @@ -110,7 +154,7 @@ mountopt = "nodev" # future. When "force_mask" is set the original permission mask is stored in # the "user.containers.override_stat" xattr and the "mount_program" option must # be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the -# extended attribute permissions to processes within containers rather then the +# extended attribute permissions to processes within containers rather than the # "force_mask" permissions. # # force_mask = "" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 000000000..34d80152c --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/local/share/containers/storage.conf +# /usr/local/etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index d6d547c64..d208e0bfa 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2,9 +2,9 @@ package storage import ( "encoding/base64" + "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "reflect" @@ -20,14 +20,30 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + +const ( + volatileFlag = "Volatile" + mountLabelFlag = "MountLabel" + processLabelFlag = "ProcessLabel" + mountOptsFlag = "MountOpts" ) var ( @@ -35,57 +51,27 @@ var ( storesLock sync.Mutex ) -// ROFileBasedStore wraps up the methods of the various types of file-based -// data stores that we implement which are needed for both read-only and -// read-write files. -type ROFileBasedStore interface { - Locker - - // Load reloads the contents of the store from disk. It should be called - // with the lock held. - Load() error - - // ReloadIfChanged reloads the contents of the store from disk if it is changed. - ReloadIfChanged() error -} - -// RWFileBasedStore wraps up the methods of various types of file-based data -// stores that we implement using read-write files. -type RWFileBasedStore interface { - // Save saves the contents of the store to disk. It should be called with - // the lock held, and Touch() should be called afterward before releasing the - // lock. - Save() error -} - -// FileBasedStore wraps up the common methods of various types of file-based -// data stores that we implement. -type FileBasedStore interface { - ROFileBasedStore - RWFileBasedStore -} - -// ROMetadataStore wraps a method for reading metadata associated with an ID. -type ROMetadataStore interface { +// roMetadataStore wraps a method for reading metadata associated with an ID. +type roMetadataStore interface { // Metadata reads metadata associated with an item with the specified ID. Metadata(id string) (string, error) } -// RWMetadataStore wraps a method for setting metadata associated with an ID. -type RWMetadataStore interface { +// rwMetadataStore wraps a method for setting metadata associated with an ID. +type rwMetadataStore interface { // SetMetadata updates the metadata associated with the item with the specified ID. SetMetadata(id, metadata string) error } -// MetadataStore wraps up methods for getting and setting metadata associated with IDs. -type MetadataStore interface { - ROMetadataStore - RWMetadataStore +// metadataStore wraps up methods for getting and setting metadata associated with IDs. +type metadataStore interface { + roMetadataStore + rwMetadataStore } -// An ROBigDataStore wraps up the read-only big-data related methods of the +// An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. -type ROBigDataStore interface { +type roBigDataStore interface { // BigData retrieves a (potentially large) piece of data associated with // this ID, if it has previously been set. BigData(id, key string) ([]byte, error) @@ -103,8 +89,8 @@ type ROBigDataStore interface { BigDataNames(id string) ([]string, error) } -// A RWImageBigDataStore wraps up how we store big-data associated with images. -type RWImageBigDataStore interface { +// A rwImageBigDataStore wraps up how we store big-data associated with images. +type rwImageBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. // Pass github.com/containers/image/manifest.Digest as digestManifest @@ -112,16 +98,16 @@ type RWImageBigDataStore interface { SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error } -// A ContainerBigDataStore wraps up how we store big-data associated with containers. -type ContainerBigDataStore interface { - ROBigDataStore +// A containerBigDataStore wraps up how we store big-data associated with containers. +type containerBigDataStore interface { + roBigDataStore // SetBigData stores a (potentially large) piece of data associated // with this ID. SetBigData(id, key string, data []byte) error } -// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers. -type ROLayerBigDataStore interface { +// A roLayerBigDataStore wraps up how we store RO big-data associated with layers. +type roLayerBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. BigData(id, key string) (io.ReadCloser, error) @@ -131,21 +117,15 @@ type ROLayerBigDataStore interface { BigDataNames(id string) ([]string, error) } -// A RWLayerBigDataStore wraps up how we store big-data associated with layers. -type RWLayerBigDataStore interface { +// A rwLayerBigDataStore wraps up how we store big-data associated with layers. +type rwLayerBigDataStore interface { // SetBigData stores a (potentially large) piece of data associated // with this ID. SetBigData(id, key string, data io.Reader) error } -// A LayerBigDataStore wraps up how we store big-data associated with layers. -type LayerBigDataStore interface { - ROLayerBigDataStore - RWLayerBigDataStore -} - -// A FlaggableStore can have flags set and cleared on items which it manages. -type FlaggableStore interface { +// A flaggableStore can have flags set and cleared on items which it manages. +type flaggableStore interface { // ClearFlag removes a named flag from an item in the store. ClearFlag(id string, flag string) error @@ -162,8 +142,10 @@ type Store interface { // settings that were passed to GetStore() when the object was created. RunRoot() string GraphRoot() string + TransientStore() bool GraphDriverName() string GraphOptions() []string + PullOptions() map[string]string UIDMap() []idtools.IDMap GIDMap() []idtools.IDMap @@ -265,6 +247,8 @@ type Store interface { // Unmount attempts to unmount an image, given an ID. // Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. UnmountImage(id string, force bool) (bool, error) // Mount attempts to mount a layer, image, or container for access, and @@ -283,9 +267,15 @@ type Store interface { // Unmount attempts to unmount a layer, image, or container, given an ID, a // name, or a mount path. Returns whether or not the layer is still mounted. + // WARNING: The return value may already be obsolete by the time it is available + // to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored. Unmount(id string, force bool) (bool, error) // Mounted returns number of times the layer has been mounted. + // + // WARNING: This value might already be obsolete by the time it is returned; + // In situations where concurrent mount/unmount attempts can happen, this field + // should not be used for any decisions, maybe apart from heuristic user warnings. Mounted(id string) (int, error) // Changes returns a summary of the changes which would need to be made @@ -367,8 +357,17 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + // ListImageBigData retrieves a list of the (possibly large) chunks of // named data associated with an image. ListImageBigData(id string) ([]string, error) @@ -392,7 +391,7 @@ type Store interface { SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error // ListLayerBigData retrieves a list of the (possibly large) chunks of - // named data associated with an layer. + // named data associated with a layer. ListLayerBigData(id string) ([]string, error) // LayerBigData retrieves a (possibly large) chunk of named data @@ -513,6 +512,11 @@ type Store interface { // Releasing AdditionalLayer handler is caller's responsibility. // This API is experimental and can be changed without bumping the major version number. LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) + + // Tries to clean up remainders of previous containers or layers that are not + // references in the json files. These can happen in the case of unclean + // shutdowns or regular restarts in transient store mode. + GarbageCollect() error } // AdditionalLayer reprents a layer that is contained in the additional layer store @@ -547,6 +551,17 @@ type LayerOptions struct { // initialize this layer. If set, it should be a child of the layer // which we want to use as the parent of the new layer. TemplateLayer string + // OriginalDigest specifies a digest of the tarstream (diff), if one is + // provided along with these LayerOptions, and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + OriginalDigest digest.Digest + // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) + // of the tarstream (diff), if one is provided along with these LayerOptions, + // and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + UncompressedDigest digest.Digest + // True is the layer info can be treated as volatile + Volatile bool } // ImageOptions is used for passing options to a Store's CreateImage() method. @@ -565,35 +580,65 @@ type ContainerOptions struct { // container's layer will inherit settings from the image's top layer // or, if it is not being created based on an image, the Store object. types.IDMappingOptions - LabelOpts []string - Flags map[string]interface{} - MountOpts []string - Volatile bool + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string } type store struct { - lastLoaded time.Time - runRoot string - graphLock Locker - usernsLock Locker + // # Locking hierarchy: + // These locks do not all need to be held simultaneously, but if some code does need to lock more than one, it MUST do so in this order: + // - graphLock + // - layerStore.start{Reading,Writing} + // - roLayerStores[].startReading (in the order of the items of the roLayerStores array) + // - imageStore.start{Reading,Writing} + // - roImageStores[].startReading (in the order of the items of the roImageStores array) + // - containerStore.start{Reading,Writing} + + // The following fields are only set when constructing store, and must never be modified afterwards. + // They are safe to access without any other locking. + runRoot string + graphDriverName string // Initially set to the user-requested value, possibly ""; updated during store construction, and does not change afterwards. + graphDriverPriority []string + // graphLock: + // - Ensures that we always reload graphDriver, and the primary layer store, after any process does store.Shutdown. This is necessary + // because (??) the Shutdown may forcibly unmount and clean up, affecting graph driver state in a way only a graph driver + // and layer store reinitialization can notice. + // - Ensures that store.Shutdown is exclusive with mount operations. This is necessary at because some + // graph drivers call mount.MakePrivate() during initialization, the mount operations require that, and the driver’s Cleanup() method + // may undo that. So, holding graphLock is required throughout the duration of Shutdown(), and the duration of any mount + // (but not unmount) calls. + // - Within this store object, protects access to some related in-memory state. + graphLock *lockfile.LockFile + usernsLock *lockfile.LockFile graphRoot string - graphDriverName string graphOptions []string + pullOptions map[string]string uidMap []idtools.IDMap gidMap []idtools.IDMap autoUsernsUser string - additionalUIDs *idSet // Set by getAvailableIDs() - additionalGIDs *idSet // Set by getAvailableIDs() autoNsMinSize uint32 autoNsMaxSize uint32 - graphDriver drivers.Driver - layerStore LayerStore - roLayerStores []ROLayerStore - imageStore ImageStore - roImageStores []ROImageStore - containerStore ContainerStore + imageStore rwImageStore + roImageStores []roImageStore + containerStore rwContainerStore digestLockRoot string disableVolatile bool + transientStore bool + + // The following fields can only be accessed with graphLock held. + graphLockLastWrite lockfile.LastWrite + // FIXME: This field is only set when holding graphLock, but locking rules of the driver + // interface itself are not documented here. It is extensively used without holding graphLock. + graphDriver drivers.Driver + layerStoreUseGetters rwLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + roLayerStoresUseGetters []roLayerStore // Almost all users should use the provided accessors instead of accessing this field directly. + + // FIXME: The following fields need locking, and don’t have it. + additionalUIDs *idSet // Set by getAvailableIDs() + additionalGIDs *idSet // Set by getAvailableIDs() } // GetStore attempts to find an already-created Store object matching the @@ -603,19 +648,24 @@ type store struct { // If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. // // These defaults observe environment variables: -// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use -// * `STORAGE_OPTS` for the string of options to pass to the driver +// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// - `STORAGE_OPTS` for the string of options to pass to the driver // // Note that we do some of this work in a child process. The calling process's // main() function needs to import our pkg/reexec package and should begin with // something like this in order to allow us to properly start that child // process: -// if reexec.Init() { -// return -// } +// +// if reexec.Init() { +// return +// } func GetStore(options types.StoreOptions) (Store, error) { + defaultOpts, err := types.Options() + if err != nil { + return nil, err + } if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { - options = types.Options() + options = defaultOpts } if options.GraphRoot != "" { @@ -636,17 +686,21 @@ func GetStore(options types.StoreOptions) (Store, error) { storesLock.Lock() defer storesLock.Unlock() + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { return s, nil } } - if options.GraphRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") - } - if options.RunRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions) + case options.GraphRoot == "": + options.GraphRoot = defaultOpts.GraphRoot + case options.RunRoot == "": + options.RunRoot = defaultOpts.RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -655,18 +709,16 @@ func GetStore(options types.StoreOptions) (Store, error) { if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { return nil, err } - for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { - if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil { - return nil, err - } + if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0700); err != nil { + return nil, err } - graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) if err != nil { return nil, err } - usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock")) + usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock")) if err != nil { return nil, err } @@ -680,20 +732,24 @@ func GetStore(options types.StoreOptions) (Store, error) { autoNsMaxSize = AutoUserNsMaxSize } s := &store{ - runRoot: options.RunRoot, - graphLock: graphLock, - graphRoot: options.GraphRoot, - graphDriverName: options.GraphDriverName, - graphOptions: options.GraphDriverOptions, - uidMap: copyIDMap(options.UIDMap), - gidMap: copyIDMap(options.GIDMap), - autoUsernsUser: options.RootAutoNsUser, - autoNsMinSize: autoNsMinSize, - autoNsMaxSize: autoNsMaxSize, - additionalUIDs: nil, - additionalGIDs: nil, - usernsLock: usernsLock, - disableVolatile: options.DisableVolatile, + runRoot: options.RunRoot, + graphDriverName: options.GraphDriverName, + graphDriverPriority: options.GraphDriverPriority, + graphLock: graphLock, + usernsLock: usernsLock, + graphRoot: options.GraphRoot, + graphOptions: options.GraphDriverOptions, + pullOptions: options.PullOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + autoUsernsUser: options.RootAutoNsUser, + autoNsMinSize: autoNsMinSize, + autoNsMaxSize: autoNsMaxSize, + disableVolatile: options.DisableVolatile, + transientStore: options.TransientStore, + + additionalUIDs: nil, + additionalGIDs: nil, } if err := s.load(); err != nil { return nil, err @@ -740,10 +796,22 @@ func (s *store) GraphRoot() string { return s.graphRoot } +func (s *store) TransientStore() bool { + return s.transientStore +} + func (s *store) GraphOptions() []string { return s.graphOptions } +func (s *store) PullOptions() map[string]string { + cp := make(map[string]string, len(s.pullOptions)) + for k, v := range s.pullOptions { + cp[k] = v + } + return cp +} + func (s *store) UIDMap() []idtools.IDMap { return copyIDMap(s.uidMap) } @@ -752,13 +820,27 @@ func (s *store) GIDMap() []idtools.IDMap { return copyIDMap(s.gidMap) } +// This must only be called when constructing store; it writes to fields that are assumed to be constant after constrution. func (s *store) load() error { - driver, err := s.GraphDriver() - if err != nil { + var driver drivers.Driver + if err := func() error { // A scope for defer + s.graphLock.Lock() + defer s.graphLock.Unlock() + lastWrite, err := s.graphLock.GetLastWrite() + if err != nil { + return err + } + s.graphLockLastWrite = lastWrite + driver, err = s.createGraphDriverLocked() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return nil + }(); err != nil { return err } - s.graphDriver = driver - s.graphDriverName = driver.String() driverPrefix := s.graphDriverName + "-" gipath := filepath.Join(s.graphRoot, driverPrefix+"images") @@ -770,22 +852,21 @@ func (s *store) load() error { return err } s.imageStore = ris - if _, err := s.ROImageStores(); err != nil { - return err - } gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0700); err != nil { return err } - rcs, err := newContainerStore(gcpath) - if err != nil { - return err - } rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") if err := os.MkdirAll(rcpath, 0700); err != nil { return err } + + rcs, err := newContainerStore(gcpath, rcpath, s.transientStore) + if err != nil { + return err + } + s.containerStore = rcs for _, store := range driver.AdditionalImageStores() { @@ -807,57 +888,86 @@ func (s *store) load() error { // GetDigestLock returns a digest-specific Locker. func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { - return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) + return lockfile.GetLockFile(filepath.Join(s.digestLockRoot, d.String())) } -func (s *store) getGraphDriver() (drivers.Driver, error) { - if s.graphDriver != nil { - return s.graphDriver, nil +// startUsingGraphDriver obtains s.graphLock and ensures that s.graphDriver is set and fresh. +// It only intended to be used on a fully-constructed store. +// If this succeeds, the caller MUST call stopUsingGraphDriver(). +func (s *store) startUsingGraphDriver() error { + s.graphLock.Lock() + succeeded := false + defer func() { + if !succeeded { + s.graphLock.Unlock() + } + }() + + lastWrite, modified, err := s.graphLock.ModifiedSince(s.graphLockLastWrite) + if err != nil { + return err } + if modified { + driver, err := s.createGraphDriverLocked() + if err != nil { + return err + } + // Our concurrency design requires s.graphDriverName not to be modified after + // store is constructed. + // It’s fine for driver.String() not to match the requested graph driver name + // (e.g. if the user asks for overlay2 and gets overlay), but it must be an idempotent + // mapping: + // driver1 := drivers.New(userInput, config) + // name1 := driver1.String() + // name2 := drivers.New(name1, config).String() + // assert(name1 == name2) + if s.graphDriverName != driver.String() { + return fmt.Errorf("graph driver name changed from %q to %q during reload", + s.graphDriverName, driver.String()) + } + s.graphDriver = driver + s.layerStoreUseGetters = nil + s.graphLockLastWrite = lastWrite + } + + succeeded = true + return nil +} + +// stopUsingGraphDriver releases graphLock obtained by startUsingGraphDriver. +func (s *store) stopUsingGraphDriver() { + s.graphLock.Unlock() +} + +// createGraphDriverLocked creates a new instance of graph driver for s, and returns it. +// Almost all users should use startUsingGraphDriver instead. +// The caller must hold s.graphLock. +func (s *store) createGraphDriverLocked() (drivers.Driver, error) { config := drivers.Options{ - Root: s.graphRoot, - RunRoot: s.runRoot, - DriverOptions: s.graphOptions, - UIDMaps: s.uidMap, - GIDMaps: s.gidMap, + Root: s.graphRoot, + RunRoot: s.runRoot, + DriverPriority: s.graphDriverPriority, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, } - driver, err := drivers.New(s.graphDriverName, config) - if err != nil { - return nil, err - } - s.graphDriver = driver - s.graphDriverName = driver.String() - return driver, nil + return drivers.New(s.graphDriverName, config) } func (s *store) GraphDriver() (drivers.Driver, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() + if err := s.startUsingGraphDriver(); err != nil { + return nil, err } - return s.getGraphDriver() + defer s.stopUsingGraphDriver() + return s.graphDriver, nil } -// LayerStore obtains and returns a handle to the writeable layer store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) LayerStore() (LayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() - } - if s.layerStore != nil { - return s.layerStore, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err +// getLayerStoreLocked obtains and returns a handle to the writeable layer store object +// used by the Store. +// It must be called with s.graphLock held. +func (s *store) getLayerStoreLocked() (rwLayerStore, error) { + if s.layerStoreUseGetters != nil { + return s.layerStoreUseGetters, nil } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") @@ -868,76 +978,238 @@ func (s *store) LayerStore() (LayerStore, error) { if err := os.MkdirAll(glpath, 0700); err != nil { return nil, err } - rls, err := s.newLayerStore(rlpath, glpath, driver) + rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) if err != nil { return nil, err } - s.layerStore = rls - return s.layerStore, nil + s.layerStoreUseGetters = rls + return s.layerStoreUseGetters, nil } -// ROLayerStores obtains additional read/only layer store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not part of the exported Store interface. -func (s *store) ROLayerStores() ([]ROLayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.roLayerStores != nil { - return s.roLayerStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { +// getLayerStore obtains and returns a handle to the writeable layer store object +// used by the store. +// It must be called WITHOUT s.graphLock held. +func (s *store) getLayerStore() (rwLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { return nil, err } + defer s.stopUsingGraphDriver() + return s.getLayerStoreLocked() +} + +// getROLayerStoresLocked obtains additional read/only layer store objects used by the +// Store. +// It must be called with s.graphLock held. +func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { + if s.roLayerStoresUseGetters != nil { + return s.roLayerStoresUseGetters, nil + } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") if err := os.MkdirAll(rlpath, 0700); err != nil { return nil, err } - for _, store := range driver.AdditionalImageStores() { + for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") - rls, err := newROLayerStore(rlpath, glpath, driver) + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) if err != nil { return nil, err } - s.roLayerStores = append(s.roLayerStores, rls) + s.roLayerStoresUseGetters = append(s.roLayerStoresUseGetters, rls) + } + return s.roLayerStoresUseGetters, nil +} + +// bothLayerStoreKindsLocked returns the primary, and additional read-only, layer store objects used by the store. +// It must be called with s.graphLock held. +func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error) { + primary, err := s.getLayerStoreLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading primary layer store data: %w", err) + } + additional, err := s.getROLayerStoresLocked() + if err != nil { + return nil, nil, fmt.Errorf("loading additional layer stores: %w", err) + } + return primary, additional, nil +} + +// bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store. +// It must be called with s.graphLock held. +func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, nil, err + } + defer s.stopUsingGraphDriver() + return s.bothLayerStoreKindsLocked() +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called with s.graphLock held. +func (s *store) allLayerStoresLocked() ([]roLayerStore, error) { + primary, additional, err := s.bothLayerStoreKindsLocked() + if err != nil { + return nil, err + } + return append([]roLayerStore{primary}, additional...), nil +} + +// allLayerStores returns a list of all layer store objects used by the Store. +// This is a convenience method for read-only users of the Store. +// It must be called WITHOUT s.graphLock held. +func (s *store) allLayerStores() ([]roLayerStore, error) { + if err := s.startUsingGraphDriver(); err != nil { + return nil, err + } + defer s.stopUsingGraphDriver() + return s.allLayerStoresLocked() +} + +// readAllLayerStores processes allLayerStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any layer store fails, it immediately returns (true, err). +// +// If all layer stores are processed without setting done == true, it returns (false, nil). +// +// Typical usage: +// +// var res T = failureValue +// if done, err := s.readAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func (s *store) readAllLayerStores(fn func(store roLayerStore) (bool, error)) (bool, error) { + layerStores, err := s.allLayerStores() + if err != nil { + return true, err + } + for _, s := range layerStores { + store := s + if err := store.startReading(); err != nil { + return true, err + } + defer store.stopReading() + if done, err := fn(store); done { + return true, err + } + } + return false, nil +} + +// writeToLayerStore is a helper for working with store.getLayerStore(): +// It locks the store for writing, checks for updates, and calls fn() +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToLayerStore(fn func(store rwLayerStore) error) error { + store, err := s.getLayerStore() + if err != nil { + return err + } + + if err := store.startWriting(); err != nil { + return err } - return s.roLayerStores, nil + defer store.stopWriting() + return fn(store) } -// ImageStore obtains and returns a handle to the writable image store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ImageStore() (ImageStore, error) { - if s.imageStore != nil { - return s.imageStore, nil +// allImageStores returns a list of all image store objects used by the Store. +// This is a convenience method for read-only users of the Store. +func (s *store) allImageStores() []roImageStore { + return append([]roImageStore{s.imageStore}, s.roImageStores...) +} + +// readAllImageStores processes allImageStores() in order: +// It locks the store for reading, checks for updates, and calls +// +// (done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading any Image store fails, it immediately returns (true, err). +// +// If all Image stores are processed without setting done == true, it returns (false, nil). +// +// Typical usage: +// +// var res T = failureValue +// if done, err := s.readAllImageStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func (s *store) readAllImageStores(fn func(store roImageStore) (bool, error)) (bool, error) { + for _, s := range s.allImageStores() { + store := s + if err := store.startReading(); err != nil { + return true, err + } + defer store.stopReading() + if done, err := fn(store); done { + return true, err + } } - return nil, ErrLoadError + return false, nil } -// ROImageStores obtains additional read/only image store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ROImageStores() ([]ROImageStore, error) { - if s.imageStore == nil { - return nil, ErrLoadError +// writeToImageStore is a convenience helper for working with store.getImageStore(): +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore. +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToImageStore(fn func() error) error { + if err := s.imageStore.startWriting(); err != nil { + return err } + defer s.imageStore.stopWriting() + return fn() +} - return s.roImageStores, nil +// writeToContainerStore is a convenience helper for working with store.getContainerStore(): +// It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore. +// It returns the return value of fn, or its own error initializing the store. +func (s *store) writeToContainerStore(fn func() error) error { + if err := s.containerStore.startWriting(); err != nil { + return err + } + defer s.containerStore.stopWriting() + return fn() } -// ContainerStore obtains and returns a handle to the container store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ContainerStore() (ContainerStore, error) { - if s.containerStore != nil { - return s.containerStore, nil +// writeToAllStores is a convenience helper for writing to all three stores: +// It locks the stores for writing, checks for updates, and calls fn(), which can then access the provided layer store, +// s.imageStore and s.containerStore. +// It returns the return value of fn, or its own error initializing the stores. +func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { + rlstore, err := s.getLayerStore() + if err != nil { + return err + } + + if err := rlstore.startWriting(); err != nil { + return err } - return nil, ErrLoadError + defer rlstore.stopWriting() + if err := s.imageStore.startWriting(); err != nil { + return err + } + defer s.imageStore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { + return err + } + defer s.containerStore.stopWriting() + + return fn(rlstore) } -func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { - if !s.graphDriver.SupportsShifting() { +// canUseShifting returns ??? +// store must be locked for writing. +func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool { + if !store.supportsShifting() { return false } if uidmap != nil && !idtools.IsContiguous(uidmap) { @@ -951,31 +1223,18 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { var parentLayer *Layer - rlstore, err := s.LayerStore() - if err != nil { - return nil, -1, err - } - rlstores, err := s.ROLayerStores() + rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, -1, err } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startWriting(); err != nil { return nil, -1, err } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + defer rlstore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { return nil, -1, err } - if id == "" { - id = stringid.GenerateRandomID() - } + defer s.containerStore.stopWriting() if options == nil { options = &LayerOptions{} } @@ -989,14 +1248,13 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w gidMap := options.GIDMap if parent != "" { var ilayer *Layer - for _, l := range append([]ROLayerStore{rlstore}, rlstores...) { + for _, l := range append([]roLayerStore{rlstore}, rlstores...) { lstore := l if lstore != rlstore { - lstore.RLock() - defer lstore.Unlock() - if err := lstore.ReloadIfChanged(); err != nil { + if err := lstore.startReading(); err != nil { return nil, -1, err } + defer lstore.stopReading() } if l, err := lstore.Get(parent); err == nil && l != nil { ilayer = l @@ -1008,7 +1266,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, ErrLayerUnknown } parentLayer = ilayer - containers, err := rcstore.Containers() + containers, err := s.containerStore.Containers() if err != nil { return nil, -1, err } @@ -1031,20 +1289,21 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w gidMap = s.gidMap } } - var layerOptions *LayerOptions - if s.canUseShifting(uidMap, gidMap) { - layerOptions = &LayerOptions{IDMappingOptions: types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}} + layerOptions := LayerOptions{ + OriginalDigest: options.OriginalDigest, + UncompressedDigest: options.UncompressedDigest, + } + if canUseShifting(rlstore, uidMap, gidMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} } else { - layerOptions = &LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ - HostUIDMapping: options.HostUIDMapping, - HostGIDMapping: options.HostGIDMapping, - UIDMap: copyIDMap(uidMap), - GIDMap: copyIDMap(gidMap), - }, + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), } } - return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff) + return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff) } func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { @@ -1053,32 +1312,18 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string } func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { - if id == "" { - id = stringid.GenerateRandomID() - } - if layer != "" { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() + layerStores, err := s.allLayerStores() if err != nil { return nil, err } var ilayer *Layer - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - if store == lstore { - store.Lock() - } else { - store.RLock() - } - defer store.Unlock() - err := store.ReloadIfChanged() - if err != nil { + if err := store.startReading(); err != nil { return nil, err } + defer store.stopReading() ilayer, err = store.Get(layer) if err == nil { break @@ -1090,28 +1335,30 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o layer = ilayer.ID } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return nil, err - } - - creationDate := time.Now().UTC() - if options != nil && !options.CreationDate.IsZero() { - creationDate = options.CreationDate - } + var res *Image + err := s.writeToImageStore(func() error { + creationDate := time.Now().UTC() + if options != nil && !options.CreationDate.IsZero() { + creationDate = options.CreationDate + } - return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) + var err error + res, err = s.imageStore.Create(id, names, layer, metadata, creationDate, options.Digest) + return err + }) + return res, err } -func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options types.IDMappingOptions) (*Layer, error) { +// imageTopLayerForMapping does ??? +// On entry: +// - ristore must be locked EITHER for reading or writing +// - s.imageStore must be locked for writing; it might be identical to ristore. +// - rlstore must be locked for writing +// - lstores must all be locked for reading +func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) { layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { // If the driver supports shifting and the layer has no mappings, we can use it. - if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + if canUseShifting(rlstore, options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { return true } // If we want host mapping, and the layer uses mappings, it's not the best match. @@ -1121,25 +1368,15 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea if options.HostGIDMapping && len(layer.GIDMap) != 0 { return false } - // If we don't care about the mapping, it's fine. - if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 { - return true - } // Compare the maps. return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) } var layer, parentLayer *Layer - allStores := append([]ROLayerStore{rlstore}, lstores...) + allStores := append([]roLayerStore{rlstore}, lstores...) // Locate the image's top layer and its parent, if it has one. + createMappedLayer := ristore == s.imageStore for _, s := range allStores { store := s - if store != rlstore { - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } - } // Walk the top layer list. for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { if cLayer, err := store.Get(candidate); err == nil { @@ -1166,6 +1403,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea if layer == nil { layer = cLayer parentLayer = cParentLayer + if store != rlstore { + // The layer is in another store, so we cannot + // create a mapped version of it to the image. + createMappedLayer = false + } } } } @@ -1180,44 +1422,41 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea return layer, nil } // The top layer's mappings don't match the ones we want, and it's in an image store - // that lets us edit image metadata... - if istore, ok := ristore.(*imageStore); ok { - // ... so create a duplicate of the layer with the desired mappings, and - // register it as an alternate top layer in the image. - var layerOptions LayerOptions - if s.canUseShifting(options.UIDMap, options.GIDMap) { - layerOptions = LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ - HostUIDMapping: true, - HostGIDMapping: true, - UIDMap: nil, - GIDMap: nil, - }, - } - } else { - layerOptions = LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ - HostUIDMapping: options.HostUIDMapping, - HostGIDMapping: options.HostGIDMapping, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - }, - } + // that lets us edit image metadata, so create a duplicate of the layer with the desired + // mappings, and register it as an alternate top layer in the image. + var layerOptions LayerOptions + if canUseShifting(rlstore, options.UIDMap, options.GIDMap) { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, } - layerOptions.TemplateLayer = layer.ID - mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) - if err != nil { - return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID) + } else { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, } - if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { - if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { - err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) - } - return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) + if err != nil { + return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) + } + // By construction, createMappedLayer can only be true if ristore == s.imageStore. + if err = s.imageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) } - layer = mappedLayer + return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) } - return layer, nil + return mappedLayer, nil } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -1230,13 +1469,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if options.HostGIDMapping { options.GIDMap = nil } - rlstore, err := s.LayerStore() + rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != "" if err != nil { return nil, err } - if id == "" { - id = stringid.GenerateRandomID() - } var imageTopLayer *Layer imageID := "" @@ -1250,56 +1486,52 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat defer s.usernsLock.Unlock() } - var imageHomeStore ROImageStore - var istore ImageStore - var istores []ROImageStore - var lstores []ROLayerStore - var cimage *Image + var imageHomeStore roImageStore // Set if image != "" + // s.imageStore is locked read-write, if image != "" + // s.roImageStores are NOT NECESSARILY ALL locked read-only if image != "" + var cimage *Image // Set if image != "" if image != "" { - var err error - lstores, err = s.ROLayerStores() - if err != nil { - return nil, err - } - istore, err = s.ImageStore() - if err != nil { - return nil, err - } - istores, err = s.ROImageStores() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } - for _, s := range append([]ROImageStore{istore}, istores...) { + defer rlstore.stopWriting() + for _, s := range lstores { store := s - if store == istore { - store.Lock() - } else { - store.RLock() - } - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return nil, err } - cimage, err = store.Get(image) - if err == nil { - imageHomeStore = store - break + defer store.stopReading() + } + if err := s.imageStore.startWriting(); err != nil { + return nil, err + } + defer s.imageStore.stopWriting() + cimage, err = s.imageStore.Get(image) + if err == nil { + imageHomeStore = s.imageStore + } else { + for _, s := range s.roImageStores { + store := s + if err := store.startReading(); err != nil { + return nil, err + } + defer store.stopReading() + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } } } if cimage == nil { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown) } imageID = cimage.ID } if options.AutoUserNs { var err error - options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage) + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage, rlstore, lstores) if err != nil { return nil, err } @@ -1311,8 +1543,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat idMappingsOptions := options.IDMappingOptions if image != "" { if cimage.TopLayer != "" { - createMappedLayer := imageHomeStore == istore - ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions) + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idMappingsOptions) if err != nil { return nil, err } @@ -1326,11 +1557,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } } else { - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() if !options.HostUIDMapping && len(options.UIDMap) == 0 { uidMap = s.uidMap } @@ -1338,34 +1568,36 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat gidMap = s.gidMap } } - var layerOptions *LayerOptions - if s.canUseShifting(uidMap, gidMap) { - layerOptions = &LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ + layerOptions := &LayerOptions{ + // Normally layers for containers are volatile only if the container is. + // But in transient store mode, all container layers are volatile. + Volatile: options.Volatile || s.transientStore, + } + if canUseShifting(rlstore, uidMap, gidMap) { + layerOptions.IDMappingOptions = + types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil, - }, - } + } } else { - layerOptions = &LayerOptions{ - IDMappingOptions: types.IDMappingOptions{ + layerOptions.IDMappingOptions = + types.IDMappingOptions{ HostUIDMapping: idMappingsOptions.HostUIDMapping, HostGIDMapping: idMappingsOptions.HostGIDMapping, UIDMap: copyIDMap(uidMap), GIDMap: copyIDMap(gidMap), - }, - } + } + } if options.Flags == nil { options.Flags = make(map[string]interface{}) } - plabel, _ := options.Flags["ProcessLabel"].(string) - mlabel, _ := options.Flags["MountLabel"].(string) - if (plabel == "" && mlabel != "") || - (plabel != "" && mlabel == "") { - return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified") + plabel, _ := options.Flags[processLabelFlag].(string) + mlabel, _ := options.Flags[mountLabelFlag].(string) + if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") { + return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified") } if plabel == "" { @@ -1373,381 +1605,254 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - options.Flags["ProcessLabel"] = processLabel - options.Flags["MountLabel"] = mountLabel + mlabel = mountLabel + options.Flags[processLabelFlag] = processLabel + options.Flags[mountLabelFlag] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true) if err != nil { return nil, err } layer = clayer.ID - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - options.IDMappingOptions = types.IDMappingOptions{ - HostUIDMapping: len(options.UIDMap) == 0, - HostGIDMapping: len(options.GIDMap) == 0, - UIDMap: copyIDMap(options.UIDMap), - GIDMap: copyIDMap(options.GIDMap), - } - container, err := rcstore.Create(id, names, imageID, layer, metadata, options) - if err != nil || container == nil { - rlstore.Delete(layer) + + // Normally only `--rm` containers are volatile, but in transient store mode all containers are volatile + if s.transientStore { + options.Volatile = true } + + var container *Container + err = s.writeToContainerStore(func() error { + options.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + var err error + container, err = s.containerStore.Create(id, names, imageID, layer, metadata, options) + if err != nil || container == nil { + if err2 := rlstore.Delete(layer); err2 != nil { + if err == nil { + err = fmt.Errorf("deleting layer %#v: %w", layer, err2) + } else { + logrus.Errorf("While recovering from a failure to create a container, error deleting layer %#v: %v", layer, err2) + } + } + } + return err + }) return container, err } func (s *store) SetMetadata(id, metadata string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - - if rlstore.Exists(id) { - return rlstore.SetMetadata(id, metadata) - } - if ristore.Exists(id) { - return ristore.SetMetadata(id, metadata) - } - if rcstore.Exists(id) { - return rcstore.SetMetadata(id, metadata) - } - return ErrNotAnID + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if s.imageStore.Exists(id) { + return s.imageStore.SetMetadata(id, metadata) + } + if s.containerStore.Exists(id) { + return s.containerStore.SetMetadata(id, metadata) + } + return ErrNotAnID + }) } func (s *store) Metadata(id string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + var res string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(id) { - return store.Metadata(id) + var err error + res, err = store.Metadata(id) + return true, err } + return false, nil + }); done { + return res, err } - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if store.Exists(id) { - return store.Metadata(id) + var err error + res, err = store.Metadata(id) + return true, err } + return false, nil + }); done { + return res, err } - cstore, err := s.ContainerStore() - if err != nil { - return "", err - } - cstore.RLock() - defer cstore.Unlock() - if err := cstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } - if cstore.Exists(id) { - return cstore.Metadata(id) + defer s.containerStore.stopReading() + if s.containerStore.Exists(id) { + return s.containerStore.Metadata(id) } return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { bigDataNames, err := store.BigDataNames(id) if err == nil { - return bigDataNames, err + res = bigDataNames + return true, nil } + return false, nil + }); done { + return res, err } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - istore, err := s.ImageStore() - if err != nil { - return -1, err - } - istores, err := s.ROImageStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { size, err := store.BigDataSize(id, key) if err == nil { - return size, nil + res = size + return true, nil } + return false, nil + }); done { + return res, err } return -1, ErrSizeUnknown } func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { - ristore, err := s.ImageStore() - if err != nil { - return "", err - } - stores, err := s.ROImageStores() - if err != nil { - return "", err - } - stores = append([]ROImageStore{ristore}, stores...) - for _, r := range stores { - ristore := r - ristore.RLock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return "", err - } + var res digest.Digest + if done, err := s.readAllImageStores(func(ristore roImageStore) (bool, error) { d, err := ristore.BigDataDigest(id, key) if err == nil && d.Validate() == nil { - return d, nil + res = d + return true, nil } + return false, nil + }); done { + return res, err } return "", ErrDigestUnknown } func (s *store) ImageBigData(id, key string) ([]byte, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } foundImage := false - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []byte + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { data, err := store.BigData(id, key) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundImage = true } + return false, nil + }); done { + return res, err } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id) + return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with an layer. func (s *store) ListLayerBigData(id string) ([]string, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } foundLayer := false - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { data, err := store.BigDataNames(id) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundLayer = true } + return false, nil + }); done { + return res, err } if foundLayer { - return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id) + return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } // LayerBigData retrieves a (possibly large) chunk of named data // associated with a layer. func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } foundLayer := false - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res io.ReadCloser + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { data, err := store.BigData(id, key) if err == nil { - return data, nil + res = data + return true, nil } if store.Exists(id) { foundLayer = true } + return false, nil + }); done { + return res, err } if foundLayer { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id) + return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } // SetLayerBigData stores a (possibly large) chunk of named data // associated with a layer. func (s *store) SetLayerBigData(id, key string, data io.Reader) error { - store, err := s.LayerStore() - if err != nil { - return err - } - - store.Lock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return err - } - return store.SetBigData(id, key, data) + return s.writeToLayerStore(func(store rwLayerStore) error { + return store.SetBigData(id, key, data) + }) } func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { - ristore, err := s.ImageStore() - if err != nil { - return err - } - - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - - return ristore.SetBigData(id, key, data, digestManifest) + return s.writeToImageStore(func() error { + return s.imageStore.SetBigData(id, key, data, digestManifest) + }) } func (s *store) ImageSize(id string) (int64, error) { - var image *Image - - lstore, err := s.LayerStore() + layerStores, err := s.allLayerStores() if err != nil { - return -1, errors.Wrapf(err, "error loading primary layer store data") - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, errors.Wrapf(err, "error loading additional layer stores") + return -1, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } - } - - var imageStore ROBigDataStore - istore, err := s.ImageStore() - if err != nil { - return -1, errors.Wrapf(err, "error loading primary image store data") - } - istores, err := s.ROImageStores() - if err != nil { - return -1, errors.Wrapf(err, "error loading additional image stores") + defer store.stopReading() } // Look for the image's record. - for _, s := range append([]ROImageStore{istore}, istores...) { + var imageStore roBigDataStore + var image *Image + for _, s := range s.allImageStores() { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } + defer store.stopReading() if image, err = store.Get(id); err == nil { imageStore = store break } } if image == nil { - return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } // Start with a list of the image's top layers, if it has any. @@ -1768,16 +1873,16 @@ func (s *store) ImageSize(id string) (int64, error) { } visited[layerID] = struct{}{} // Look for the layer and the store that knows about it. - var layerStore ROLayerStore + var layerStore roLayerStore var layer *Layer - for _, store := range append([]ROLayerStore{lstore}, lstores...) { + for _, store := range layerStores { if layer, err = store.Get(layerID); err == nil { layerStore = store break } } if layer == nil { - return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown) } // The UncompressedSize is only valid if there's a digest to go with it. n := layer.UncompressedSize @@ -1785,7 +1890,7 @@ func (s *store) ImageSize(id string) (int64, error) { // Compute the size. n, err = layerStore.DiffSize("", layer.ID) if err != nil { - return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err) } } // Count this layer. @@ -1800,12 +1905,12 @@ func (s *store) ImageSize(id string) (int64, error) { // Count big data items. names, err := imageStore.BigDataNames(id) if err != nil { - return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err) } for _, name := range names { n, err := imageStore.BigDataSize(id, name) if err != nil { - return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err) } size += n } @@ -1814,21 +1919,16 @@ func (s *store) ImageSize(id string) (int64, error) { } func (s *store) ContainerSize(id string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() + layerStores, err := s.allLayerStores() if err != nil { return -1, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + for _, s := range layerStores { store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return -1, err } + defer store.stopReading() } // Get the location of the container directory and container run directory. @@ -1842,188 +1942,132 @@ func (s *store) ContainerSize(id string) (int64, error) { return -1, err } - rcstore, err := s.ContainerStore() - if err != nil { - return -1, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + err = s.writeToContainerStore(func() error { // Yes, s.containerStore.BigDataSize requires a write lock. + // Read the container record. + container, err := s.containerStore.Get(id) + if err != nil { + return err + } - // Read the container record. - container, err := rcstore.Get(id) - if err != nil { - return -1, err - } + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range layerStores { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + } + break + } + } + if layer == nil { + return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) + } - // Read the container's layer's size. - var layer *Layer - var size int64 - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - if layer, err = store.Get(container.LayerID); err == nil { - size, err = store.DiffSize("", layer.ID) + // Count big data items. + names, err := s.containerStore.BigDataNames(id) + if err != nil { + return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) + } + for _, name := range names { + n, err := s.containerStore.BigDataSize(id, name) if err != nil { - return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) } - break + size += n } - } - if layer == nil { - return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) - } - // Count big data items. - names, err := rcstore.BigDataNames(id) - if err != nil { - return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) - } - for _, name := range names { - n, err := rcstore.BigDataSize(id, name) + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) if err != nil { - return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + return err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return err } size += n - } - - // Count the size of our container directory and container run directory. - n, err := directory.Size(cdir) - if err != nil { - return -1, err - } - size += n - n, err = directory.Size(rdir) - if err != nil { - return -1, err - } - size += n - return size, nil + res = size + return nil + }) + return res, err } func (s *store) ListContainerBigData(id string) ([]string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - return rcstore.BigDataNames(id) + return s.containerStore.BigDataNames(id) } func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return -1, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return -1, err - } - return rcstore.BigDataSize(id, key) + var res int64 = -1 + err := s.writeToContainerStore(func() error { // Yes, BigDataSize requires a write lock. + var err error + res, err = s.containerStore.BigDataSize(id, key) + return err + }) + return res, err } func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return "", err - } - return rcstore.BigDataDigest(id, key) + var res digest.Digest + err := s.writeToContainerStore(func() error { // Yes, BigDataDigest requires a write lock. + var err error + res, err = s.containerStore.BigDataDigest(id, key) + return err + }) + return res, err } func (s *store) ContainerBigData(id, key string) ([]byte, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - return rcstore.BigData(id, key) + defer s.containerStore.stopReading() + return s.containerStore.BigData(id, key) } func (s *store) SetContainerBigData(id, key string, data []byte) error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - return rcstore.SetBigData(id, key, data) + return s.writeToContainerStore(func() error { + return s.containerStore.SetBigData(id, key, data) + }) } func (s *store) Exists(id string) bool { - lstore, err := s.LayerStore() - if err != nil { - return false - } - lstores, err := s.ROLayerStores() - if err != nil { - return false - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return false - } + var res = false + + if done, _ := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(id) { - return true + res = true + return true, nil } + return false, nil + }); done { + return res } - istore, err := s.ImageStore() - if err != nil { - return false - } - istores, err := s.ROImageStores() - if err != nil { - return false - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return false - } + if done, _ := s.readAllImageStores(func(store roImageStore) (bool, error) { if store.Exists(id) { - return true + res = true + return true, nil } + return false, nil + }); done { + return res } - rcstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return false } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return false - } - if rcstore.Exists(id) { - return true - } - - return false + defer s.containerStore.stopReading() + return s.containerStore.Exists(id) } func dedupeNames(names []string) []string { @@ -2038,182 +2082,133 @@ func dedupeNames(names []string) []string { return deduped } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + layerFound := false + if err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if !rlstore.Exists(id) { + return nil + } + layerFound = true + return rlstore.updateNames(id, deduped, op) + }); err != nil || layerFound { return err } - if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { + if err := s.imageStore.startWriting(); err != nil { return err } - if ristore.Exists(id) { - return ristore.SetNames(id, deduped) + defer s.imageStore.stopWriting() + if s.imageStore.Exists(id) { + return s.imageStore.updateNames(id, deduped, op) } // Check is id refers to a RO Store - ristores, err := s.ROImageStores() - if err != nil { - return err - } - for _, s := range ristores { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { return err } + defer store.stopReading() if i, err := store.Get(id); err == nil { if len(deduped) > 1 { // Do not want to create image name in R/W storage deduped = deduped[1:] } - _, err := ristore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) - if err == nil { - return ristore.Save() - } + _, err := s.imageStore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) return err } } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + containerFound := false + if err := s.writeToContainerStore(func() error { + if !s.containerStore.Exists(id) { + return nil + } + containerFound = true + return s.containerStore.updateNames(id, deduped, op) + }); err != nil || containerFound { return err } - if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) - } + return ErrLayerUnknown } func (s *store) Names(id string) ([]string, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if l, err := store.Get(id); l != nil && err == nil { - return l.Names, nil + res = l.Names + return true, nil } + return false, nil + }); done { + return res, err } - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if i, err := store.Get(id); i != nil && err == nil { - return i.Names, nil + res = i.Names + return true, nil } + return false, nil + }); done { + return res, err } - rcstore, err := s.ContainerStore() - if err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } - if c, err := rcstore.Get(id); c != nil && err == nil { + defer s.containerStore.stopReading() + if c, err := s.containerStore.Get(id); c != nil && err == nil { return c.Names, nil } return nil, ErrLayerUnknown } func (s *store) Lookup(name string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + var res string + + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if l, err := store.Get(name); l != nil && err == nil { - return l.ID, nil + res = l.ID + return true, nil } + return false, nil + }); done { + return res, err } - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return "", err - } + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { if i, err := store.Get(name); i != nil && err == nil { - return i.ID, nil + res = i.ID + return true, nil } + return false, nil + }); done { + return res, err } - cstore, err := s.ContainerStore() - if err != nil { - return "", err - } - cstore.RLock() - defer cstore.Unlock() - if err := cstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } - if c, err := cstore.Get(name); c != nil && err == nil { + defer s.containerStore.stopReading() + if c, err := s.containerStore.Get(name); c != nil && err == nil { return c.ID, nil } @@ -2221,413 +2216,280 @@ func (s *store) Lookup(name string) (string, error) { } func (s *store) DeleteLayer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - - if rlstore.Exists(id) { - if l, err := rlstore.Get(id); err != nil { - id = l.ID - } - layers, err := rlstore.Layers() - if err != nil { - return err - } - for _, layer := range layers { - if layer.Parent == id { - return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID) + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren) + } + } + images, err := s.imageStore.Images() + if err != nil { + return err } - } - images, err := ristore.Images() - if err != nil { - return err - } - for _, image := range images { - if image.TopLayer == id { - return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + for _, image := range images { + if image.TopLayer == id { + return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) + } + } + containers, err := s.containerStore.Containers() + if err != nil { + return err } - if stringutils.InSlice(image.MappedTopLayers, id) { - // No write access to the image store, fail before the layer is deleted - if _, ok := ristore.(*imageStore); !ok { - return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + for _, container := range containers { + if container.LayerID == id { + return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) } } - } - containers, err := rcstore.Containers() - if err != nil { - return err - } - for _, container := range containers { - if container.LayerID == id { - return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID) + if err := rlstore.Delete(id); err != nil { + return fmt.Errorf("delete layer %v: %w", id, err) } - } - if err := rlstore.Delete(id); err != nil { - return errors.Wrapf(err, "delete layer %v", id) - } - // The check here is used to avoid iterating the images if we don't need to. - // There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers. - if istore, ok := ristore.(*imageStore); ok { for _, image := range images { if stringutils.InSlice(image.MappedTopLayers, id) { - if err = istore.removeMappedTopLayer(image.ID, id); err != nil { - return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID) + if err = s.imageStore.removeMappedTopLayer(image.ID, id); err != nil { + return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err) } } } + return nil } - return nil - } - return ErrNotALayer + return ErrNotALayer + }) } func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return nil, err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return nil, err - } layersToRemove := []string{} - if ristore.Exists(id) { - image, err := ristore.Get(id) - if err != nil { - return nil, err - } - id = image.ID - containers, err := rcstore.Containers() - if err != nil { - return nil, err - } - aContainerByImage := make(map[string]string) - for _, container := range containers { - aContainerByImage[container.ImageID] = container.ID - } - if container, ok := aContainerByImage[id]; ok { - return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) - } - images, err := ristore.Images() - if err != nil { - return nil, err - } - layers, err := rlstore.Layers() - if err != nil { - return nil, err - } - childrenByParent := make(map[string]*[]string) - for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) + if err := s.writeToAllStores(func(rlstore rwLayerStore) error { + if s.imageStore.Exists(id) { + image, err := s.imageStore.Get(id) + if err != nil { + return err } - } - otherImagesByTopLayer := make(map[string]string) - for _, img := range images { - if img.ID != id { - otherImagesByTopLayer[img.TopLayer] = img.ID - for _, layerID := range img.MappedTopLayers { - otherImagesByTopLayer[layerID] = img.ID - } + id = image.ID + containers, err := s.containerStore.Containers() + if err != nil { + return err } - } - if commit { - if err = ristore.Delete(id); err != nil { - return nil, err + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID } - } - layer := image.TopLayer - lastRemoved := "" - for layer != "" { - if rcstore.Exists(layer) { - break + if container, ok := aContainerByImage[id]; ok { + return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) } - if _, ok := otherImagesByTopLayer[layer]; ok { - break + images, err := s.imageStore.Images() + if err != nil { + return err + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + childrenByParent := make(map[string][]string) + for _, layer := range layers { + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) + } + otherImagesTopLayers := make(map[string]struct{}) + for _, img := range images { + if img.ID != id { + otherImagesTopLayers[img.TopLayer] = struct{}{} + for _, layerID := range img.MappedTopLayers { + otherImagesTopLayers[layerID] = struct{}{} + } + } } - parent := "" - if l, err := rlstore.Get(layer); err == nil { - parent = l.Parent + if commit { + if err = s.imageStore.Delete(id); err != nil { + return err + } } - hasOtherRefs := func() bool { - layersToCheck := []string{layer} - if layer == image.TopLayer { - layersToCheck = append(layersToCheck, image.MappedTopLayers...) + layer := image.TopLayer + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } + for layer != "" { + if s.containerStore.Exists(layer) { + break } - for _, layer := range layersToCheck { - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { + if _, used := otherImagesTopLayers[layer]; used { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasChildrenNotBeingRemoved := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue + } return true } } } + return false } - return false - } - if hasOtherRefs() { - break - } - lastRemoved = layer - if layer == image.TopLayer { - layersToRemove = append(layersToRemove, image.MappedTopLayers...) + if hasChildrenNotBeingRemoved() { + break + } + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} + layer = parent } - layersToRemove = append(layersToRemove, lastRemoved) - layer = parent + } else { + return ErrNotAnImage } - } else { - return nil, ErrNotAnImage - } - if commit { - for _, layer := range layersToRemove { - if err = rlstore.Delete(layer); err != nil { - return nil, err + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return err + } } } + return nil + }); err != nil { + return nil, err } return layersToRemove, nil } func (s *store) DeleteContainer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } - - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - errChan := make(chan error) - var wg sync.WaitGroup + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if !s.containerStore.Exists(id) { + return ErrNotAContainer + } - if rlstore.Exists(container.LayerID) { - wg.Add(1) - go func() { - errChan <- rlstore.Delete(container.LayerID) - wg.Done() - }() - } - wg.Add(1) - go func() { - errChan <- rcstore.Delete(id) - wg.Done() - }() + container, err := s.containerStore.Get(id) + if err != nil { + return ErrNotAContainer + } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - errChan <- os.RemoveAll(gcpath) - wg.Done() - }() + errChan := make(chan error) + var wg sync.WaitGroup - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + if rlstore.Exists(container.LayerID) { wg.Add(1) go func() { - errChan <- os.RemoveAll(rcpath) + errChan <- rlstore.Delete(container.LayerID) wg.Done() }() - - go func() { - wg.Wait() - close(errChan) - }() - - var errors []error - for { - select { - case err, ok := <-errChan: - if !ok { - return multierror.Append(nil, errors...).ErrorOrNil() - } - if err != nil { - errors = append(errors, err) - } - } - } } - } - return ErrNotAContainer -} - -func (s *store) Delete(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } + wg.Add(1) + go func() { + errChan <- s.containerStore.Delete(id) + wg.Done() + }() + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) + }() + + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) + }() - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } + go func() { + wg.Wait() + close(errChan) + }() - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - if rlstore.Exists(container.LayerID) { - if err = rlstore.Delete(container.LayerID); err != nil { - return err - } - if err = rcstore.Delete(id); err != nil { - return err - } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(gcpath); err != nil { - return err - } - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(rcpath); err != nil { - return err - } - return nil + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) } - return ErrNotALayer } - } - if ristore.Exists(id) { - return ristore.Delete(id) - } - if rlstore.Exists(id) { - return rlstore.Delete(id) - } - return ErrLayerUnknown -} - -func (s *store) Wipe() error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rlstore, err := s.LayerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if err := ristore.ReloadIfChanged(); err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { - return err - } + return multierror.Append(nil, errors...).ErrorOrNil() + }) +} - if err = rcstore.Wipe(); err != nil { - return err - } - if err = ristore.Wipe(); err != nil { - return err - } - return rlstore.Wipe() +func (s *store) Delete(id string) error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if s.containerStore.Exists(id) { + if container, err := s.containerStore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = s.containerStore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + return ErrNotALayer + } + } + if s.imageStore.Exists(id) { + return s.imageStore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown + }) +} + +func (s *store) Wipe() error { + return s.writeToAllStores(func(rlstore rwLayerStore) error { + if err := s.containerStore.Wipe(); err != nil { + return err + } + if err := s.imageStore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() + }) } func (s *store) Status() ([][2]string, error) { - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, err } @@ -2639,33 +2501,24 @@ func (s *store) Version() ([][2]string, error) { } func (s *store) mount(id string, options drivers.MountOpts) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { return "", err } + defer s.stopUsingGraphDriver() - s.graphLock.Lock() - defer s.graphLock.Unlock() - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + rlstore, err := s.getLayerStoreLocked() + if err != nil { return "", err } - - modified, err := s.graphLock.Modified() - if err != nil { + if err := rlstore.startWriting(); err != nil { return "", err } + defer rlstore.stopWriting() - /* We need to make sure the home mount is present when the Mount is done. */ - if modified { - s.graphDriver = nil - s.layerStore = nil - s.graphDriver, err = s.getGraphDriver() - if err != nil { - return "", err - } - s.lastLoaded = time.Now() + if options.UidMaps != nil || options.GidMaps != nil { + options.DisableShifting = !canUseShifting(rlstore, options.UidMaps, options.GidMaps) } if rlstore.Exists(id) { @@ -2704,11 +2557,12 @@ func (s *store) Mount(id, mountLabel string) (string, error) { options.GidMaps = container.GIDMap options.Options = container.MountOpts() if !s.disableVolatile { - if v, found := container.Flags["Volatile"]; found { - options.Volatile = v.(bool) + if v, found := container.Flags[volatileFlag]; found { + if b, ok := v.(bool); ok { + options.Volatile = b + } } } - options.DisableShifting = !s.canUseShifting(container.UIDMap, container.GIDMap) } return s.mount(id, options) } @@ -2717,15 +2571,14 @@ func (s *store) Mounted(id string) (int, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return 0, err } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return 0, err } + defer rlstore.stopReading() return rlstore.Mounted(id) } @@ -2742,80 +2595,66 @@ func (s *store) Unmount(id string, force bool) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - rlstore, err := s.LayerStore() - if err != nil { - return false, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return false, err - } - if rlstore.Exists(id) { - return rlstore.Unmount(id, force) - } - return false, ErrLayerUnknown + var res bool + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + var err error + res, err = rlstore.unmount(id, force, false) + return err + } + return ErrLayerUnknown + }) + return res, err } func (s *store) Changes(from, to string) ([]archive.Change, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res []archive.Change + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(to) { - return store.Changes(from, to) + var err error + res, err = store.Changes(from, to) + return true, err } + return false, nil + }); done { + return res, err } return nil, ErrLayerUnknown } func (s *store) DiffSize(from, to string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(to) { - return store.DiffSize(from, to) + var err error + res, err = store.DiffSize(from, to) + return true, err } + return false, nil + }); done { + return res, err } return -1, ErrLayerUnknown } func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - lstore, err := s.LayerStore() - if err != nil { + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { return nil, err } - lstores, err := s.ROLayerStores() + defer s.stopUsingGraphDriver() + + layerStores, err := s.allLayerStoresLocked() if err != nil { return nil, err } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { + + for _, s := range layerStores { store := s - store.RLock() - if err := store.ReloadIfChanged(); err != nil { + if err := store.startReading(); err != nil { return nil, err } if store.Exists(to) { @@ -2823,130 +2662,87 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if rc != nil && err == nil { wrapped := ioutils.NewReadCloserWrapper(rc, func() error { err := rc.Close() - store.Unlock() + store.stopReading() return err }) return wrapped, nil } - store.Unlock() + store.stopReading() return rc, err } - store.Unlock() + store.stopReading() } return nil, ErrLayerUnknown } func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return err + return s.writeToLayerStore(func(rlstore rwLayerStore) error { + if !rlstore.Exists(to) { + return ErrLayerUnknown } - } - if !rlstore.Exists(to) { - return ErrLayerUnknown - } - return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + }) } func (s *store) CleanupStagingDirectory(stagingDirectory string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return err - } - } - return rlstore.CleanupStagingDirectory(stagingDirectory) + return s.writeToLayerStore(func(rlstore rwLayerStore) error { + return rlstore.CleanupStagingDirectory(stagingDirectory) + }) } func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return nil, err + var res *drivers.DriverWithDifferOutput + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if to != "" && !rlstore.Exists(to) { + return ErrLayerUnknown } - } - if to != "" && !rlstore.Exists(to) { - return nil, ErrLayerUnknown - } - return rlstore.ApplyDiffWithDiffer(to, options, differ) + var err error + res, err = rlstore.ApplyDiffWithDiffer(to, options, differ) + return err + }) + return res, err } func (s *store) DifferTarget(id string) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - if err = rlstore.Load(); err != nil { - return "", err + var res string + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(id) { + var err error + res, err = rlstore.DifferTarget(id) + return err } - } - if rlstore.Exists(id) { - return rlstore.DifferTarget(id) - } - return "", ErrLayerUnknown + return ErrLayerUnknown + }) + return res, err } func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return -1, err - } - if rlstore.Exists(to) { - return rlstore.ApplyDiff(to, diff) - } - return -1, ErrLayerUnknown + var res int64 = -1 + err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if rlstore.Exists(to) { + var err error + res, err = rlstore.ApplyDiff(to, diff) + return err + } + return ErrLayerUnknown + }) + return res, err } -func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { +func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { storeLayers, err := m(store, d) if err != nil { - if errors.Cause(err) != ErrLayerUnknown { - return nil, err + if !errors.Is(err, ErrLayerUnknown) { + return true, err } - continue + return false, nil } layers = append(layers, storeLayers...) + return false, nil + }); err != nil { + return nil, err } if len(layers) == 0 { return nil, ErrLayerUnknown @@ -2956,51 +2752,42 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err) } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err) } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } func (s *store) LayerSize(id string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return -1, err - } + var res int64 = -1 + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { if store.Exists(id) { - return store.Size(id) + var err error + res, err = store.Size(id) + return true, err } + return false, nil + }); done { + return res, err } return -1, ErrLayerUnknown } func (s *store) LayerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.LayerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, nil, err } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return nil, nil, err } + defer rlstore.stopReading() if rlstore.Exists(id) { return rlstore.ParentOwners(id) } @@ -3008,25 +2795,19 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) { } func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, nil, err - } - rcstore, err := s.ContainerStore() + rlstore, err := s.getLayerStore() if err != nil { return nil, nil, err } - rlstore.RLock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + if err := rlstore.startReading(); err != nil { return nil, nil, err } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + defer rlstore.stopReading() + if err := s.containerStore.startReading(); err != nil { return nil, nil, err } - container, err := rcstore.Get(id) + defer s.containerStore.stopReading() + container, err := s.containerStore.Get(id) if err != nil { return nil, nil, err } @@ -3037,109 +2818,74 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { } func (s *store) Layers() ([]Layer, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - if err := lstore.LoadLocked(); err != nil { - return nil, err - } - layers, err := lstore.Layers() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - - for _, s := range lstores { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var layers []Layer + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { storeLayers, err := store.Layers() if err != nil { - return nil, err + return true, err } layers = append(layers, storeLayers...) + return false, nil + }); done { + return nil, err } return layers, nil } func (s *store) Images() ([]Image, error) { var images []Image - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { storeImages, err := store.Images() if err != nil { - return nil, err + return true, err } images = append(images, storeImages...) + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) Containers() ([]Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - return rcstore.Containers() + return s.containerStore.Containers() } func (s *store) Layer(id string) (*Layer, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROLayerStore{lstore}, lstores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res *Layer + if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { layer, err := store.Get(id) if err == nil { - return layer, nil + res = layer + return true, nil } + return false, nil + }); done { + return res, err } return nil, ErrLayerUnknown } func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { - adriver, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) - if !ok { - return nil, ErrLayerUnknown + var adriver drivers.AdditionalLayerStoreDriver + if err := func() error { // A scope for defer + if err := s.startUsingGraphDriver(); err != nil { + return err + } + defer s.stopUsingGraphDriver() + a, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) + if !ok { + return ErrLayerUnknown + } + adriver = a + return nil + }(); err != nil { + return nil, err } al, err := adriver.LookupAdditionalLayer(d, imageref) @@ -3176,29 +2922,23 @@ func (al *additionalLayer) CompressedSize() int64 { } func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) { - rlstore, err := al.s.LayerStore() + rlstore, rlstores, err := al.s.bothLayerStoreKinds() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { - return nil, err - } - rlstores, err := al.s.ROLayerStores() - if err != nil { + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() var parentLayer *Layer if parent != "" { - for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + for _, lstore := range append([]roLayerStore{rlstore}, rlstores...) { if lstore != rlstore { - lstore.RLock() - defer lstore.Unlock() - if err := lstore.ReloadIfChanged(); err != nil { + if err := lstore.startReading(); err != nil { return nil, err } + defer lstore.stopReading() } parentLayer, err = lstore.Get(parent) if err == nil { @@ -3218,117 +2958,75 @@ func (al *additionalLayer) Release() { } func (s *store) Image(id string) (*Image, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + var res *Image + if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { image, err := store.Get(id) if err == nil { - return image, nil + res = image + return true, nil } + return false, nil + }); done { + return res, err } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { - images := []*Image{} layer, err := s.Layer(id) if err != nil { return nil, err } - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, s := range append([]ROImageStore{istore}, istores...) { - store := s - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + images := []*Image{} + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { imageList, err := store.Images() if err != nil { - return nil, err + return true, err } for _, image := range imageList { + image := image if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { images = append(images, &image) } } + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { images := []*Image{} - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.RLock() - defer store.Unlock() - if err := store.ReloadIfChanged(); err != nil { - return nil, err - } + if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { imageList, err := store.ByDigest(d) - if err != nil && errors.Cause(err) != ErrImageUnknown { - return nil, err + if err != nil && !errors.Is(err, ErrImageUnknown) { + return true, err } images = append(images, imageList...) + return false, nil + }); err != nil { + return nil, err } return images, nil } func (s *store) Container(id string) (*Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } + defer s.containerStore.stopReading() - return rcstore.Get(id) + return s.containerStore.Get(id) } func (s *store) ContainerLayerID(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } - container, err := rcstore.Get(id) + defer s.containerStore.stopReading() + container, err := s.containerStore.Get(id) if err != nil { return "", err } @@ -3340,16 +3038,11 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return nil, err } - containerList, err := rcstore.Containers() + defer s.containerStore.stopReading() + containerList, err := s.containerStore.Containers() if err != nil { return nil, err } @@ -3363,17 +3056,12 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { } func (s *store) ContainerDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } + defer s.containerStore.stopReading() - id, err = rcstore.Lookup(id) + id, err := s.containerStore.Lookup(id) if err != nil { return "", err } @@ -3387,18 +3075,12 @@ func (s *store) ContainerDirectory(id string) (string, error) { } func (s *store) ContainerRunDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - - rcstore.RLock() - defer rcstore.Unlock() - if err := rcstore.ReloadIfChanged(); err != nil { + if err := s.containerStore.startReading(); err != nil { return "", err } + defer s.containerStore.stopReading() - id, err = rcstore.Lookup(id) + id, err := s.containerStore.Lookup(id) if err != nil { return "", err } @@ -3428,7 +3110,7 @@ func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { if err != nil { return nil, err } - return ioutil.ReadFile(filepath.Join(dir, file)) + return os.ReadFile(filepath.Join(dir, file)) } func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { @@ -3448,26 +3130,25 @@ func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { if err != nil { return nil, err } - return ioutil.ReadFile(filepath.Join(dir, file)) + return os.ReadFile(filepath.Join(dir, file)) } func (s *store) Shutdown(force bool) ([]string, error) { mounted := []string{} - modified := false - rlstore, err := s.LayerStore() - if err != nil { + if err := s.startUsingGraphDriver(); err != nil { return mounted, err } + defer s.stopUsingGraphDriver() - s.graphLock.Lock() - defer s.graphLock.Unlock() - - rlstore.Lock() - defer rlstore.Unlock() - if err := rlstore.ReloadIfChanged(); err != nil { + rlstore, err := s.getLayerStoreLocked() + if err != nil { + return mounted, err + } + if err := rlstore.startWriting(); err != nil { return nil, err } + defer rlstore.stopWriting() layers, err := rlstore.Layers() if err != nil { @@ -3479,28 +3160,35 @@ func (s *store) Shutdown(force bool) ([]string, error) { } mounted = append(mounted, layer.ID) if force { - for layer.MountCount > 0 { - _, err2 := rlstore.Unmount(layer.ID, force) + for { + _, err2 := rlstore.unmount(layer.ID, force, true) + if err2 == ErrLayerNotMounted { + break + } if err2 != nil { if err == nil { err = err2 } break } - modified = true } } } if len(mounted) > 0 && err == nil { - err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") + err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) } if err == nil { err = s.graphDriver.Cleanup() - s.graphLock.Touch() - modified = true - } - if modified { - rlstore.Touch() + // We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(), + // so that we reload after a .Shutdown() the same way other processes would. + // Shutdown() is basically an error path, so reliability is more important than performance. + if _, err2 := s.graphLock.RecordWrite(); err2 != nil { + if err == nil { + err = err2 + } else { + err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err) + } + } } return mounted, err } @@ -3588,9 +3276,10 @@ const AutoUserNsMaxSize = 65536 // creating a user namespace. const RootAutoUserNsUser = "containers" -// SetDefaultConfigFilePath sets the default configuration to the specified path +// SetDefaultConfigFilePath sets the default configuration to the specified path, and loads the file. +// Deprecated: Use types.SetDefaultConfigFilePath, which can return an error. func SetDefaultConfigFilePath(path string) { - types.SetDefaultConfigFilePath(path) + _ = types.SetDefaultConfigFilePath(path) } // DefaultConfigFile returns the path to the storage config file used @@ -3600,13 +3289,17 @@ func DefaultConfigFile(rootless bool) (string, error) { // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. +// Deprecated: Use types.ReloadConfigurationFile, which can return an error. func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { - types.ReloadConfigurationFile(configFile, storeOptions) + _ = types.ReloadConfigurationFile(configFile, storeOptions) } // GetDefaultMountOptions returns the default mountoptions defined in container/storage func GetDefaultMountOptions() ([]string, error) { - defaultStoreOptions := types.Options() + defaultStoreOptions, err := types.Options() + if err != nil { + return nil, err + } return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) } @@ -3640,3 +3333,20 @@ func (s *store) Free() { } } } + +// Tries to clean up old unreferenced container leftovers. returns the first error +// but continues as far as it can +func (s *store) GarbageCollect() error { + firstErr := s.writeToContainerStore(func() error { + return s.containerStore.GarbageCollect() + }) + + moreErr := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return rlstore.GarbageCollect() + }) + if firstErr == nil { + firstErr = moreErr + } + + return firstErr +} diff --git a/vendor/github.com/containers/storage/types/default_override_test.conf b/vendor/github.com/containers/storage/types/default_override_test.conf new file mode 100644 index 000000000..caa537ba9 --- /dev/null +++ b/vendor/github.com/containers/storage/types/default_override_test.conf @@ -0,0 +1,11 @@ +[storage] + +# Default Storage Driver +driver = "" + +# Primary Read/Write location of container storage +graphroot = "environment_override_graphroot" + +# Storage path for rootless users +# +rootless_storage_path = "environment_override_rootless_storage_path" diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index d920d12eb..dc6ee3e0c 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -55,4 +55,8 @@ var ( ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") + // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace. + ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace") ) diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go index 82824ae2b..aabdf7a81 100644 --- a/vendor/github.com/containers/storage/types/idmappings.go +++ b/vendor/github.com/containers/storage/types/idmappings.go @@ -5,7 +5,6 @@ import ( "os" "github.com/containers/storage/pkg/idtools" - "github.com/pkg/errors" ) // AutoUserNsOptions defines how to automatically create a user namespace. @@ -77,18 +76,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri if subUIDMap != "" && subGIDMap != "" { mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) if err != nil { - return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err) } options.UIDMap = mappings.UIDs() options.GIDMap = mappings.GIDs() } parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") if err != nil { - return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err) } parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") if err != nil { - return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err) } options.UIDMap = append(options.UIDMap, parsedUIDMap...) options.GIDMap = append(options.GIDMap, parsedGIDMap...) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index c0e3ea637..01f4e5a79 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -1,47 +1,111 @@ package types import ( + "errors" "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/storage/drivers/overlay" cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/idtools" "github.com/sirupsen/logrus" ) // TOML-friendly explicit tables used for conversions. -type tomlConfig struct { +type TomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - RootlessStoragePath string `toml:"rootless_storage_path"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver,omitempty"` + DriverPriority []string `toml:"driver_priority,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + TransientStore bool `toml:"transient_store,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } -// defaultConfigFile path to the system wide storage.conf file +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" + storageConfEnv = "CONTAINERS_STORAGE_CONF" +) + var ( - defaultConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false + defaultStoreOptionsOnce sync.Once + loadDefaultStoreOptionsErr error + once sync.Once + storeOptions StoreOptions + storeError error + defaultConfigFileSet bool + // defaultConfigFile path to the system wide storage.conf file + defaultConfigFile = SystemConfigFile // DefaultStoreOptions is a reasonable default set of options. defaultStoreOptions StoreOptions ) -func init() { - defaultStoreOptions.RunRoot = "/run/containers/storage" - defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" +func loadDefaultStoreOptions() { defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + setDefaults := func() { + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } + } + setDefaults() + + if path, ok := os.LookupEnv(storageConfEnv); ok { + defaultOverrideConfigFile = path + if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() + return + } + + if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { + homeConfigFile := filepath.Join(path, "containers", "storage.conf") + if _, err := os.Stat(homeConfigFile); err == nil { + // user storage.conf in XDG_CONFIG_HOME if it exists + defaultOverrideConfigFile = homeConfigFile + } else { + if !os.IsNotExist(err) { + loadDefaultStoreOptionsErr = err + return + } + } + } + + _, err := os.Stat(defaultOverrideConfigFile) + if err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() + return + } + + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { + loadDefaultStoreOptionsErr = err + return + } + setDefaults() } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -52,6 +116,10 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str defaultRootlessGraphRoot string err error ) + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + if loadDefaultStoreOptionsErr != nil { + return StoreOptions{}, loadDefaultStoreOptionsErr + } storageOpts := defaultStoreOptions if rootless && rootlessUID != 0 { storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) @@ -109,8 +177,8 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str return storageOpts, nil } -// DefaultStoreOptions returns the default storage ops for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { +// loadStoreOptions returns the default storage ops for containers +func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) if err != nil { return defaultStoreOptions, err @@ -118,6 +186,21 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) } +// UpdateOptions should be called iff container engine recieved a SIGHUP, +// otherwise use DefaultStoreOptions +func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + return storeOptions, storeError +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + once.Do(func() { + storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + }) + return storeOptions, storeError +} + // StoreOptions is used for passing initialization options to GetStore(), for // initializing a Store object and the underlying storage that it controls. type StoreOptions struct { @@ -131,10 +214,16 @@ type StoreOptions struct { // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage RootlessStoragePath string `toml:"rootless_storage_path"` - // GraphDriverName is the underlying storage driver that we'll be - // using. It only needs to be specified the first time a Store is - // initialized for a given RunRoot and GraphRoot. + // If the driver is not specified, the best suited driver will be picked + // either from GraphDriverPriority, if specified, or from the platform + // dependent priority list (in that order). GraphDriverName string `json:"driver,omitempty"` + // GraphDriverPriority is a list of storage drivers that will be tried + // to initialize the Store for a given RunRoot and GraphRoot unless a + // GraphDriverName is set. + // This list can be used to define a custom order in which the drivers + // will be tried. + GraphDriverPriority []string `json:"driver-priority,omitempty"` // GraphDriverOptions are driver-specific options. GraphDriverOptions []string `json:"driver-options,omitempty"` // UIDMap and GIDMap are used for setting up a container's root filesystem @@ -153,6 +242,8 @@ type StoreOptions struct { PullOptions map[string]string `toml:"pull_options"` // DisableVolatile doesn't allow volatile mounts when it is set. DisableVolatile bool `json:"disable-volatile,omitempty"` + // If transient, don't persist containers over boot (stores db in runroot) + TransientStore bool `json:"transient_store,omitempty"` } // isRootlessDriver returns true if the given storage driver is valid for containers running as non root @@ -169,13 +260,13 @@ func isRootlessDriver(driver string) bool { // getRootlessStorageOpts returns the storage opts for containers running as non root func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - const overlayDriver = "overlay" dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) if err != nil { return opts, err } opts.RunRoot = rootlessRuntime + opts.PullOptions = systemOpts.PullOptions if systemOpts.RootlessStoragePath != "" { opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) if err != nil { @@ -191,30 +282,29 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { opts.GraphDriverName = driver } - if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { - supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) - if err != nil { - return opts, err - } - if supported { - opts.GraphDriverName = overlayDriver - } else { - if path, err := exec.LookPath("fuse-overlayfs"); err == nil { - opts.GraphDriverName = overlayDriver - opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} - } - } - if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") + opts.GraphDriverName = overlayDriver + } + + // If the configuration file was explicitly set, then copy all the options + // present. + if defaultConfigFileSet { + opts.GraphDriverOptions = systemOpts.GraphDriverOptions + } else if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break } } } if opts.GraphDriverName == "" { - opts.GraphDriverName = "vfs" + if len(systemOpts.GraphDriverPriority) == 0 { + opts.GraphDriverName = "vfs" + } else { + opts.GraphDriverPriority = systemOpts.GraphDriverPriority + } } if os.Getenv("STORAGE_OPTS") != "" { @@ -238,56 +328,57 @@ var prevReloadConfig = struct { }{} // SetDefaultConfigFilePath sets the default configuration to the specified path -func SetDefaultConfigFilePath(path string) { +func SetDefaultConfigFilePath(path string) error { defaultConfigFile = path defaultConfigFileSet = true - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) } -func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { +func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error { prevReloadConfig.mutex.Lock() defer prevReloadConfig.mutex.Unlock() fi, err := os.Stat(configFile) if err != nil { - if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) - } - return + return err } mtime := fi.ModTime() if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { *storeOptions = *prevReloadConfig.storeOptions - return + return nil } - ReloadConfigurationFile(configFile, storeOptions) + if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { + return err + } - prevReloadConfig.storeOptions = storeOptions + cOptions := *storeOptions + prevReloadConfig.storeOptions = &cOptions prevReloadConfig.mod = mtime prevReloadConfig.configFile = configFile + return nil } // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. -func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { - data, err := ioutil.ReadFile(configFile) - if err != nil { +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error { + config := new(TomlConfig) + + meta, err := toml.DecodeFile(configFile, &config) + if err == nil { + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile) + } + } else { if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) - return + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) + return err } } - config := new(tomlConfig) - - if _, err := toml.Decode(string(data), config); err != nil { - fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) - return - } - - // Clear storeOptions of previos settings + // Clear storeOptions of previous settings *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver @@ -296,8 +387,13 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { config.Storage.Driver = os.Getenv("STORAGE_DRIVER") storeOptions.GraphDriverName = config.Storage.Driver } - if storeOptions.GraphDriverName == "" { - logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") + storeOptions.GraphDriverName = overlayDriver + } + storeOptions.GraphDriverPriority = config.Storage.DriverPriority + if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 { + logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile) } if config.Storage.RunRoot != "" { storeOptions.RunRoot = config.Storage.RunRoot @@ -341,8 +437,8 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) if err != nil { - fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) - return + logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return err } storeOptions.UIDMap = mappings.UIDs() storeOptions.GIDMap = mappings.GIDs() @@ -350,16 +446,15 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") if err != nil { - fmt.Print(err) - } else { - storeOptions.UIDMap = uidmap + return err } gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") if err != nil { - fmt.Print(err) - } else { - storeOptions.GIDMap = gidmap + return err } + + storeOptions.UIDMap = uidmap + storeOptions.GIDMap = gidmap storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser if config.Storage.Options.AutoUsernsMinSize > 0 { storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize @@ -372,6 +467,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile + storeOptions.TransientStore = config.Storage.TransientStore storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) @@ -381,8 +477,46 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { storeOptions.GraphDriverOptions = nil } + return nil +} + +func Options() (StoreOptions, error) { + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + return defaultStoreOptions, loadDefaultStoreOptionsErr +} + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) } -func Options() StoreOptions { - return defaultStoreOptions +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil } diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go new file mode 100644 index 000000000..eed1a3d94 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -0,0 +1,13 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +var ( + defaultOverrideConfigFile = "/etc/containers/storage.conf" +) diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go new file mode 100644 index 000000000..afb7ec6b4 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -0,0 +1,14 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/var/run/containers/storage" + defaultGraphRoot string = "/var/db/containers/storage" + SystemConfigFile = "/usr/local/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" +) diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go new file mode 100644 index 000000000..d44aaf76a --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -0,0 +1,14 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/etc/containers/storage.conf" +) diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go new file mode 100644 index 000000000..d44aaf76a --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -0,0 +1,14 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" + SystemConfigFile = "/usr/share/containers/storage.conf" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultOverrideConfigFile = "/etc/containers/storage.conf" +) diff --git a/vendor/github.com/containers/storage/types/storage_broken.conf b/vendor/github.com/containers/storage/types/storage_broken.conf new file mode 100644 index 000000000..3bca1d978 --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_broken.conf @@ -0,0 +1,18 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +foo = "bar" + +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "/run/containers/test" + +[storage.options] +# Primary Read/Write location of container storage +graphroot = "/var/lib/containers/storage" + diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index d2dca7b68..72c38f861 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -1,8 +1,8 @@ package types import ( + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -10,7 +10,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,7 +21,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { } path = filepath.Join(path, "containers") if err := os.MkdirAll(path, 0700); err != nil { - return "", errors.Wrapf(err, "unable to make rootless runtime") + return "", fmt.Errorf("unable to make rootless runtime: %w", err) } return path, nil } @@ -75,7 +74,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e return runtimeDir, nil } - initCommand, err := ioutil.ReadFile(env.getProcCommandFile()) + initCommand, err := os.ReadFile(env.getProcCommandFile()) if err != nil || string(initCommand) == "systemd" { runUserDir := env.getRunUserDir() if isRootlessRuntimeDirOwner(runUserDir, env) { @@ -87,7 +86,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e if tmpPerUserDir != "" { if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { - logrus.Errorf("failed to create temp directory for user: %v", err) + logrus.Errorf("Failed to create temp directory for user: %v", err) } else { return tmpPerUserDir, nil } @@ -132,7 +131,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) { home := homedir.Get() if home == "" { - return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err) } // runc doesn't like symlinks in the rootfs path, and at least // on CoreOS /home is a symlink to /var/home, so resolve any symlink. @@ -155,12 +154,28 @@ func getRootlessUID() int { } func expandEnvPath(path string, rootlessUID int) (string, error) { + var err error path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) - return filepath.Clean(os.ExpandEnv(path)), nil + path = os.ExpandEnv(path) + newpath, err := filepath.EvalSymlinks(path) + if err != nil { + newpath = filepath.Clean(path) + } + return newpath, nil } func DefaultConfigFile(rootless bool) (string, error) { - if defaultConfigFileSet || !rootless { + if defaultConfigFileSet { + return defaultConfigFile, nil + } + + if path, ok := os.LookupEnv(storageConfEnv); ok { + return path, nil + } + if !rootless { + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + return defaultOverrideConfigFile, nil + } return defaultConfigFile, nil } @@ -181,7 +196,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio fi, err := os.Stat(configFile) if err != nil { if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + logrus.Warningf("Failed to read %s %v\n", configFile, err.Error()) } return } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 3ada41f73..9c27b5d39 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -1,6 +1,7 @@ package storage import ( + "fmt" "os" "os/user" "path/filepath" @@ -11,7 +12,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" libcontainerUser "github.com/opencontainers/runc/libcontainer/user" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,7 +43,7 @@ func getAdditionalSubIDs(username string) (*idSet, *idSet, error) { } mappings, err := idtools.NewIDMappings(username, username) if err != nil { - logrus.Errorf("cannot find mappings for user %q: %v", username, err) + logrus.Errorf("Cannot find mappings for user %q: %v", username, err) } else { uids = getHostIDs(mappings.UIDs()) gids = getHostIDs(mappings.GIDs()) @@ -78,6 +78,10 @@ func (s *store) getAvailableIDs() (*idSet, *idSet, error) { return u, g, nil } +// nobodyUser returns the UID and GID of the "nobody" user. Hardcode its value +// for simplicity. +const nobodyUser = 65534 + // parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and // /etc/group files. func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { @@ -98,10 +102,10 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { if u.Name == "nobody" { continue } - if u.Uid > size { + if u.Uid > size && u.Uid != nobodyUser { size = u.Uid } - if u.Gid > size { + if u.Gid > size && u.Gid != nobodyUser { size = u.Gid } } @@ -113,7 +117,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { if g.Name == "nobody" { continue } - if g.Gid > size { + if g.Gid > size && g.Gid != nobodyUser { size = g.Gid } } @@ -123,16 +127,9 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { } // getMaxSizeFromImage returns the maximum ID used by the specified image. -// The layer stores must be already locked. -func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) { - lstore, err := s.LayerStore() - if err != nil { - return 0, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return 0, err - } +// On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getMaxSizeFromImage(image *Image, rlstore rwLayerStore, lstores []roLayerStore, passwdFile, groupFile string) (_ uint32, retErr error) { + layerStores := append([]roLayerStore{rlstore}, lstores...) size := uint32(0) @@ -140,7 +137,7 @@ func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFi layerName := image.TopLayer outer: for { - for _, ls := range append([]ROLayerStore{lstore}, lstores...) { + for _, ls := range layerStores { layer, err := ls.Get(layerName) if err != nil { continue @@ -164,12 +161,7 @@ outer: } continue outer } - return 0, errors.Errorf("cannot find layer %q", layerName) - } - - rlstore, err := s.LayerStore() - if err != nil { - return 0, err + return 0, fmt.Errorf("cannot find layer %q", layerName) } layerOptions := &LayerOptions{ @@ -183,11 +175,19 @@ outer: // We need to create a temporary layer so we can mount it and lookup the // maximum IDs used. - clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false) + clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false) if err != nil { return 0, err } - defer rlstore.Delete(clayer.ID) + defer func() { + if err2 := rlstore.Delete(clayer.ID); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("deleting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error deleting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() mountOptions := drivers.MountOpts{ MountLabel: "", @@ -200,7 +200,15 @@ outer: if err != nil { return 0, err } - defer rlstore.Unmount(clayer.ID, true) + defer func() { + if _, err2 := rlstore.unmount(clayer.ID, true, false); err2 != nil { + if retErr == nil { + retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2) + } else { + logrus.Errorf("Error unmounting temporary layer %#v: %v", clayer.ID, err2) + } + } + }() userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile) if userFilesSize > size { @@ -211,7 +219,8 @@ outer: } // getAutoUserNS creates an automatic user namespace -func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { +// If image != nil, On entry, rlstore must be locked for writing, and lstores must be locked for reading. +func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rlstore rwLayerStore, lstores []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) { requestedSize := uint32(0) initialSize := uint32(1) if options.Size > 0 { @@ -223,10 +232,10 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image availableUIDs, availableGIDs, err := s.getAvailableIDs() if err != nil { - return nil, nil, errors.Wrapf(err, "cannot read mappings") + return nil, nil, fmt.Errorf("cannot read mappings: %w", err) } - // Look every container that is using a user namespace and store + // Look at every container that is using a user namespace and store // the intervals that are already used. containers, err := s.Containers() if err != nil { @@ -250,7 +259,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image size = s.autoNsMinSize } if image != nil { - sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile) + sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile) if err != nil { return nil, nil, err } @@ -259,7 +268,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image } } if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { - return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) + return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize) } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 80d56041b..7f9e92b93 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "unicode" "github.com/containers/storage/types" ) @@ -16,12 +17,12 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { return types.GetRootlessRuntimeDir(rootlessUID) } -// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { return types.DefaultStoreOptionsAutoDetectUID() } -// DefaultStoreOptions returns the default storage ops for containers +// DefaultStoreOptions returns the default storage options for containers func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { return types.DefaultStoreOptions(rootless, rootlessUID) } @@ -40,3 +41,47 @@ func validateMountOptions(mountOptions []string) error { } return nil } + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} + +func nameLooksLikeID(name string) bool { + if len(name) != 64 { + return false + } + for _, c := range name { + if !unicode.Is(unicode.ASCII_Hex_Digit, c) { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index e843a4613..cff5af1a6 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -111,14 +111,13 @@ type Conn struct { } } -// New establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -// Deprecated: use NewWithContext instead +// Deprecated: use NewWithContext instead. func New() (*Conn, error) { return NewWithContext(context.Background()) } -// NewWithContext same as New with context +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. func NewWithContext(ctx context.Context) (*Conn, error) { conn, err := NewSystemConnectionContext(ctx) if err != nil && os.Geteuid() == 0 { @@ -127,44 +126,41 @@ func NewWithContext(ctx context.Context) (*Conn, error) { return conn, err } -// NewSystemConnection establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection -// Deprecated: use NewSystemConnectionContext instead +// Deprecated: use NewSystemConnectionContext instead. func NewSystemConnection() (*Conn, error) { return NewSystemConnectionContext(context.Background()) } -// NewSystemConnectionContext same as NewSystemConnection with context +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) }) } -// NewUserConnection establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -// Deprecated: use NewUserConnectionContext instead +// Deprecated: use NewUserConnectionContext instead. func NewUserConnection() (*Conn, error) { return NewUserConnectionContext(context.Background()) } -// NewUserConnectionContext same as NewUserConnection with context +// NewUserConnectionContext establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. func NewUserConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) }) } -// NewSystemdConnection establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -// Deprecated: use NewSystemdConnectionContext instead +// Deprecated: use NewSystemdConnectionContext instead. func NewSystemdConnection() (*Conn, error) { return NewSystemdConnectionContext(context.Background()) } -// NewSystemdConnectionContext same as NewSystemdConnection with context +// NewSystemdConnectionContext establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { // We skip Hello when talking directly to systemd. @@ -174,7 +170,7 @@ func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { }) } -// Close closes an established connection +// Close closes an established connection. func (c *Conn) Close() { c.sysconn.Close() c.sigconn.Close() @@ -217,7 +213,7 @@ func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { // GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager // interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html +// https://developer.gnome.org/glib/unstable/gvariant-text.html. func (c *Conn) GetManagerProperty(prop string) (string, error) { variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) if err != nil { diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index 01879ba15..fa04afc70 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -73,7 +73,12 @@ func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args return jobID, nil } -// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise // specified by the mode string). // // Takes the unit to activate, plus a mode string. The mode needs to be one of @@ -103,137 +108,124 @@ func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args // should not be considered authoritative. // // If an error does occur, it will be returned to the user alongside a job ID of 0. -// Deprecated: use StartUnitContext instead -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StartUnitContext(context.Background(), name, mode, ch) -} - -// StartUnitContext same as StartUnit with context func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) } -// StopUnit is similar to StartUnit but stops the specified unit rather -// than starting it. -// Deprecated: use StopUnitContext instead +// Deprecated: use StopUnitContext instead. func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { return c.StopUnitContext(context.Background(), name, mode, ch) } -// StopUnitContext same as StopUnit with context +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) } -// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -// Deprecated: use ReloadUnitContext instead +// Deprecated: use ReloadUnitContext instead. func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { return c.ReloadUnitContext(context.Background(), name, mode, ch) } -// ReloadUnitContext same as ReloadUnit with context +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) } -// RestartUnit restarts a service. If a service is restarted that isn't -// running it will be started. -// Deprecated: use RestartUnitContext instead +// Deprecated: use RestartUnitContext instead. func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.RestartUnitContext(context.Background(), name, mode, ch) } -// RestartUnitContext same as RestartUnit with context +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) } -// TryRestartUnit is like RestartUnit, except that a service that isn't running -// is not affected by the restart. -// Deprecated: use TryRestartUnitContext instead +// Deprecated: use TryRestartUnitContext instead. func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.TryRestartUnitContext(context.Background(), name, mode, ch) } -// TryRestartUnitContext same as TryRestartUnit with context +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } -// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart -// otherwise. -// Deprecated: use ReloadOrRestartUnitContext instead +// Deprecated: use ReloadOrRestartUnitContext instead. func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) } -// ReloadOrRestartUnitContext same as ReloadOrRestartUnit with context +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) } -// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" -// flavored restart otherwise. -// Deprecated: use ReloadOrTryRestartUnitContext instead +// Deprecated: use ReloadOrTryRestartUnitContext instead. func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) } -// ReloadOrTryRestartUnitContext same as ReloadOrTryRestartUnit with context +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) } -// StartTransientUnit() may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnit(), properties contains properties -// of the unit. -// Deprecated: use StartTransientUnitContext instead +// Deprecated: use StartTransientUnitContext instead. func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) } -// StartTransientUnitContext same as StartTransientUnit with context +// StartTransientUnitContext may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnitContext, properties contains properties +// of the unit. func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } -// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's -// processes are killed. -// Deprecated: use KillUnitContext instead +// Deprecated: use KillUnitContext instead. func (c *Conn) KillUnit(name string, signal int32) { c.KillUnitContext(context.Background(), name, signal) } -// KillUnitContext same as KillUnit with context +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { c.KillUnitWithTarget(ctx, name, All, signal) } -// KillUnitWithTarget is like KillUnitContext, but allows you to specify which process in the unit to send the signal to +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() } -// ResetFailedUnit resets the "failed" state of a specific unit. -// Deprecated: use ResetFailedUnitContext instead +// Deprecated: use ResetFailedUnitContext instead. func (c *Conn) ResetFailedUnit(name string) error { return c.ResetFailedUnitContext(context.Background(), name) } -// ResetFailedUnitContext same as ResetFailedUnit with context +// ResetFailedUnitContext resets the "failed" state of a specific unit. func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() } -// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. -// Deprecated: use SystemStateContext instead +// Deprecated: use SystemStateContext instead. func (c *Conn) SystemState() (*Property, error) { return c.SystemStateContext(context.Background()) } -// SystemStateContext same as SystemState with context +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { var err error var prop dbus.Variant @@ -247,7 +239,7 @@ func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { return &Property{Name: "SystemState", Value: prop}, nil } -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant @@ -270,36 +262,36 @@ func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInte return out, nil } -// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. -// Deprecated: use GetUnitPropertiesContext instead +// Deprecated: use GetUnitPropertiesContext instead. func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { return c.GetUnitPropertiesContext(context.Background(), unit) } -// GetUnitPropertiesContext same as GetUnitPropertiesContext with context +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { path := unitPath(unit) return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") } -// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. -// Deprecated: use GetUnitPathPropertiesContext instead +// Deprecated: use GetUnitPathPropertiesContext instead. func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { return c.GetUnitPathPropertiesContext(context.Background(), path) } -// GetUnitPathPropertiesContext same as GetUnitPathProperties with context +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") } -// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties. -// Deprecated: use GetAllPropertiesContext instead +// Deprecated: use GetAllPropertiesContext instead. func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { return c.GetAllPropertiesContext(context.Background(), unit) } -// GetAllPropertiesContext same as GetAllProperties with context +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { path := unitPath(unit) return c.getProperties(ctx, path, "") @@ -323,64 +315,63 @@ func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface strin return &Property{Name: propertyName, Value: prop}, nil } -// Deprecated: use GetUnitPropertyContext instead +// Deprecated: use GetUnitPropertyContext instead. func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { return c.GetUnitPropertyContext(context.Background(), unit, propertyName) } -// GetUnitPropertyContext same as GetUnitProperty with context +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) } -// GetServiceProperty returns property for given service name and property name -// Deprecated: use GetServicePropertyContext instead +// Deprecated: use GetServicePropertyContext instead. func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { return c.GetServicePropertyContext(context.Background(), service, propertyName) } -// GetServicePropertyContext same as GetServiceProperty with context +// GetServiceProperty returns property for given service name and property name. func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) } -// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope -// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit -// Deprecated: use GetUnitTypePropertiesContext instead +// Deprecated: use GetUnitTypePropertiesContext instead. func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) } -// GetUnitTypePropertiesContext same as GetUnitTypeProperties with context +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { path := unitPath(unit) return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) } -// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) +} + +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. // Not all properties may be changed at runtime, but many resource management // settings (primarily those in systemd.cgroup(5)) may. The changes are applied // instantly, and stored on disk for future boots, unless runtime is true, in which // case the settings only apply until the next reboot. name is the name of the unit // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. -// Deprecated: use SetUnitPropertiesContext instead -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) -} - -// SetUnitPropertiesContext same as SetUnitProperties with context func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } -// Deprecated: use GetUnitTypePropertyContext instead +// Deprecated: use GetUnitTypePropertyContext instead. func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) } -// GetUnitTypePropertyContext same as GetUnitTypeProperty with context +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) } @@ -426,58 +417,55 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { return status, nil } -// ListUnits returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -// Deprecated: use ListUnitsContext instead +// Deprecated: use ListUnitsContext instead. func (c *Conn) ListUnits() ([]UnitStatus, error) { return c.ListUnitsContext(context.Background()) } -// ListUnitsContext same as ListUnits with context +// ListUnitsContext returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) } -// ListUnitsFiltered returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -// Deprecated: use ListUnitsFilteredContext instead +// Deprecated: use ListUnitsFilteredContext instead. func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { return c.ListUnitsFilteredContext(context.Background(), states) } -// ListUnitsFilteredContext same as ListUnitsFiltered with context +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) } -// ListUnitsByPatterns returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -// Deprecated: use ListUnitsByPatternsContext instead +// Deprecated: use ListUnitsByPatternsContext instead. func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { return c.ListUnitsByPatternsContext(context.Background(), states, patterns) } -// ListUnitsByPatternsContext same as ListUnitsByPatterns with context +// ListUnitsByPatternsContext returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) } -// ListUnitsByNames returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// Note: Requires systemd v230 or higher -// Deprecated: use ListUnitsByNamesContext instead +// Deprecated: use ListUnitsByNamesContext instead. func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { return c.ListUnitsByNamesContext(context.Background(), units) } -// ListUnitsByNamesContext same as ListUnitsByNames with context +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// +// Requires systemd v230 or higher. func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) } @@ -513,37 +501,43 @@ func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { return files, nil } -// ListUnitFiles returns an array of all available units on disk. -// Deprecated: use ListUnitFilesContext instead +// Deprecated: use ListUnitFilesContext instead. func (c *Conn) ListUnitFiles() ([]UnitFile, error) { return c.ListUnitFilesContext(context.Background()) } -// ListUnitFilesContext same as ListUnitFiles with context +// ListUnitFiles returns an array of all available units on disk. func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) } -// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. -// Deprecated: use ListUnitFilesByPatternsContext instead +// Deprecated: use ListUnitFilesByPatternsContext instead. func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) } -// ListUnitFilesByPatternsContext same as ListUnitFilesByPatterns with context +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) } type LinkUnitFileChange EnableUnitFileChange -// LinkUnitFiles() links unit files (that are located outside of the +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the // usual unit search paths) into the unit search path. // // It takes a list of absolute paths to unit files to link and two -// booleans. The first boolean controls whether the unit shall be +// booleans. +// +// The first boolean controls whether the unit shall be // enabled for runtime only (true, /run), or persistently (false, // /etc). +// // The second controls whether symlinks pointing to other units shall // be replaced if necessary. // @@ -551,12 +545,6 @@ type LinkUnitFileChange EnableUnitFileChange // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. -// Deprecated: use LinkUnitFilesContext instead -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - return c.LinkUnitFilesContext(context.Background(), files, runtime, force) -} - -// LinkUnitFilesContext same as LinkUnitFiles with context func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) @@ -583,8 +571,13 @@ func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime return changes, nil } -// EnableUnitFiles() may be used to enable one or more units in the system (by -// creating symlinks to them in /etc or /run). +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). // // It takes a list of unit files to enable (either just file names or full // absolute paths if the unit files are residing outside the usual unit @@ -599,12 +592,6 @@ func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. -// Deprecated: use EnableUnitFilesContext instead -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - return c.EnableUnitFilesContext(context.Background(), files, runtime, force) -} - -// EnableUnitFilesContext same as EnableUnitFiles with context func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { var carries_install_info bool @@ -639,8 +626,13 @@ type EnableUnitFileChange struct { Destination string // Destination of the symlink } -// DisableUnitFiles() may be used to disable one or more units in the system (by -// removing symlinks to them from /etc or /run). +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). // // It takes a list of unit files to disable (either just file names or full // absolute paths if the unit files are residing outside the usual unit @@ -651,12 +643,6 @@ type EnableUnitFileChange struct { // consists of structures with three strings: the type of the change (one of // symlink or unlink), the file name of the symlink and the destination of the // symlink. -// Deprecated: use DisableUnitFilesContext instead -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - return c.DisableUnitFilesContext(context.Background(), files, runtime) -} - -// DisableUnitFilesContext same as DisableUnitFiles with context func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) @@ -689,21 +675,20 @@ type DisableUnitFileChange struct { Destination string // Destination of the symlink } -// MaskUnitFiles masks one or more units in the system -// -// It takes three arguments: -// * list of units to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// * force flag -// Deprecated: use MaskUnitFilesContext instead +// Deprecated: use MaskUnitFilesContext instead. func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { return c.MaskUnitFilesContext(context.Background(), files, runtime, force) } -// MaskUnitFilesContext same as MaskUnitFiles with context +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) @@ -736,20 +721,18 @@ type MaskUnitFileChange struct { Destination string // Destination of the symlink } -// UnmaskUnitFiles unmasks one or more units in the system -// -// It takes two arguments: -// * list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// Deprecated: use UnmaskUnitFilesContext instead +// Deprecated: use UnmaskUnitFilesContext instead. func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { return c.UnmaskUnitFilesContext(context.Background(), files, runtime) } -// UnmaskUnitFilesContext same as UnmaskUnitFiles with context +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { result := make([][]interface{}, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) @@ -782,14 +765,13 @@ type UnmaskUnitFileChange struct { Destination string // Destination of the symlink } -// Reload instructs systemd to scan for and reload unit files. This is -// equivalent to a 'systemctl daemon-reload'. -// Deprecated: use ReloadContext instead +// Deprecated: use ReloadContext instead. func (c *Conn) Reload() error { return c.ReloadContext(context.Background()) } -// ReloadContext same as Reload with context +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. func (c *Conn) ReloadContext(ctx context.Context) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() } @@ -798,12 +780,12 @@ func unitPath(name string) dbus.ObjectPath { return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) } -// unitName returns the unescaped base element of the supplied escaped path +// unitName returns the unescaped base element of the supplied escaped path. func unitName(dpath dbus.ObjectPath) string { return pathBusUnescape(path.Base(string(dpath))) } -// Currently queued job definition +// JobStatus holds a currently queued job definition. type JobStatus struct { Id uint32 // The numeric job id Unit string // The primary unit name for this job @@ -813,13 +795,12 @@ type JobStatus struct { UnitPath dbus.ObjectPath // The unit object path } -// ListJobs returns an array with all currently queued jobs -// Deprecated: use ListJobsContext instead +// Deprecated: use ListJobsContext instead. func (c *Conn) ListJobs() ([]JobStatus, error) { return c.ListJobsContext(context.Background()) } -// ListJobsContext same as ListJobs with context +// ListJobsContext returns an array with all currently queued jobs. func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { return c.listJobsInternal(ctx) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml index 3938f3834..b94ff8cf9 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -4,10 +4,12 @@ language: go go: - - 1.7.x - - 1.8.x + - 1.13.x + - 1.16.x - tip - +arch: + - AMD64 + - ppc64le os: - linux - osx diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 49b2baa9f..3624617c8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure" alternative to `filepath.Join`, and in particular it provides certain guarantees that are not provided by `filepath.Join`. +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + This is the function prototype: ```go @@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error) This library **guarantees** the following: * If no error is set, the resulting string **must** be a child path of - `SecureJoin` and will not contain any symlink path components (they will all - be expanded). + `root` and will not contain any symlink path components (they will all be + expanded). * When expanding symlinks, all symlink path components **must** be resolved relative to the provided root. In particular, this can be considered a @@ -25,7 +38,7 @@ This library **guarantees** the following: these symlinks will **not** be expanded lexically (`filepath.Clean` is not called on the input before processing). -* Non-existant path components are unaffected by `SecureJoin` (similar to +* Non-existent path components are unaffected by `SecureJoin` (similar to `filepath.EvalSymlinks`'s semantics). * The returned path will always be `filepath.Clean`ed and thus not contain any @@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` +[lwn-obeneath]: https://lwn.net/Articles/767547/ [go#20126]: https://github.com/golang/go/issues/20126 ### License ### diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index ee1372d33..717903969 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.2 +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/go.mod b/vendor/github.com/cyphar/filepath-securejoin/go.mod new file mode 100644 index 000000000..0607c1fa0 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/go.mod @@ -0,0 +1,3 @@ +module github.com/cyphar/filepath-securejoin + +go 1.13 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index c4ca3d713..7dd08dbbd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -12,39 +12,20 @@ package securejoin import ( "bytes" + "errors" "os" "path/filepath" "strings" "syscall" - - "github.com/pkg/errors" ) -// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been -// evaluated in attempting to securely join the two given paths. -var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join") - // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is // effectively a more broad version of os.IsNotExist. func IsNotExist(err error) bool { - // If it's a bone-fide ENOENT just bail. - if os.IsNotExist(errors.Cause(err)) { - return true - } - // Check that it's not actually an ENOTDIR, which in some cases is a more // convoluted case of ENOENT (usually involving weird paths). - var errno error - switch err := errors.Cause(err).(type) { - case *os.PathError: - errno = err.Err - case *os.LinkError: - errno = err.Err - case *os.SyscallError: - errno = err.Err - } - return errno == syscall.ENOTDIR || errno == syscall.ENOENT + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } // SecureJoinVFS joins the two given path components (similar to Join) except @@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { n := 0 for unsafePath != "" { if n > 255 { - return "", ErrSymlinkLoop + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} } // Next path component, p. diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf deleted file mode 100644 index 66bb574b9..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf +++ /dev/null @@ -1 +0,0 @@ -github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab071..c245a8951 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -2,7 +2,6 @@ package units import ( "fmt" - "regexp" "strconv" "strings" ) @@ -26,16 +25,17 @@ const ( PiB = 1024 * TiB ) -type unitMap map[string]int64 +type unitMap map[byte]int64 var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 @@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } - size, err := strconv.ParseFloat(matches[1], 64) + size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix } return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } diff --git a/vendor/github.com/godbus/dbus/v5/README.markdown b/vendor/github.com/godbus/dbus/v5/README.md similarity index 81% rename from vendor/github.com/godbus/dbus/v5/README.markdown rename to vendor/github.com/godbus/dbus/v5/README.md index 1fb2eacaa..5c2412583 100644 --- a/vendor/github.com/godbus/dbus/v5/README.markdown +++ b/vendor/github.com/godbus/dbus/v5/README.md @@ -14,14 +14,12 @@ D-Bus message bus system. ### Installation -This packages requires Go 1.7. If you installed it and set up your GOPATH, just run: +This packages requires Go 1.12 or later. It can be installed by running the command below: ``` -go get github.com/godbus/dbus +go get github.com/godbus/dbus/v5 ``` -If you want to use the subpackages, you can install them the same way. - ### Usage The complete package documentation and some simple examples are available at @@ -30,10 +28,12 @@ The complete package documentation and some simple examples are available at gives a short overview over the basic usage. #### Projects using godbus -- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. +- [fyne](https://github.com/fyne-io/fyne) a cross platform GUI in Go inspired by Material Design. +- [fynedesk](https://github.com/fyne-io/fynedesk) a full desktop environment for Linux/Unix using Fyne. - [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API. -- [playerbm](https://github.com/altdesktop/playerbm) a bookmark utility for media players. - [iwd](https://github.com/shibumi/iwd) go bindings for the internet wireless daemon "iwd". +- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. +- [playerbm](https://github.com/altdesktop/playerbm) a bookmark utility for media players. Please note that the API is considered unstable for now and may change without further notice. diff --git a/vendor/github.com/godbus/dbus/v5/auth.go b/vendor/github.com/godbus/dbus/v5/auth.go index 283487a0e..a59b4c0eb 100644 --- a/vendor/github.com/godbus/dbus/v5/auth.go +++ b/vendor/github.com/godbus/dbus/v5/auth.go @@ -53,7 +53,7 @@ type Auth interface { // bus. Auth must not be called on shared connections. func (conn *Conn) Auth(methods []Auth) error { if methods == nil { - uid := strconv.Itoa(os.Getuid()) + uid := strconv.Itoa(os.Geteuid()) methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} } in := bufio.NewReader(conn.transport) @@ -75,9 +75,9 @@ func (conn *Conn) Auth(methods []Auth) error { s = s[1:] for _, v := range s { for _, m := range methods { - if name, data, status := m.FirstData(); bytes.Equal(v, name) { + if name, _, status := m.FirstData(); bytes.Equal(v, name) { var ok bool - err = authWriteLine(conn.transport, []byte("AUTH"), v, data) + err = authWriteLine(conn.transport, []byte("AUTH"), v) if err != nil { return err } @@ -194,11 +194,14 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo } conn.uuid = string(s[1]) return nil, true + case state == waitingForOk && string(s[0]) == "DATA": + err = authWriteLine(conn.transport, []byte("DATA")) + if err != nil { + return err, false + } case state == waitingForOk && string(s[0]) == "REJECTED": return nil, false - case state == waitingForOk && (string(s[0]) == "DATA" || - string(s[0]) == "ERROR"): - + case state == waitingForOk && string(s[0]) == "ERROR": err = authWriteLine(conn.transport, []byte("CANCEL")) if err != nil { return err, false diff --git a/vendor/github.com/godbus/dbus/v5/conn.go b/vendor/github.com/godbus/dbus/v5/conn.go index 29fe018ad..76fc5cde3 100644 --- a/vendor/github.com/godbus/dbus/v5/conn.go +++ b/vendor/github.com/godbus/dbus/v5/conn.go @@ -73,7 +73,7 @@ func SessionBus() (conn *Conn, err error) { return } -func getSessionBusAddress() (string, error) { +func getSessionBusAddress(autolaunch bool) (string, error) { if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" { return address, nil @@ -81,12 +81,26 @@ func getSessionBusAddress() (string, error) { os.Setenv("DBUS_SESSION_BUS_ADDRESS", address) return address, nil } + if !autolaunch { + return "", errors.New("dbus: couldn't determine address of session bus") + } return getSessionBusPlatformAddress() } // SessionBusPrivate returns a new private connection to the session bus. func SessionBusPrivate(opts ...ConnOption) (*Conn, error) { - address, err := getSessionBusAddress() + address, err := getSessionBusAddress(true) + if err != nil { + return nil, err + } + + return Dial(address, opts...) +} + +// SessionBusPrivate returns a new private connection to the session bus. If +// the session bus is not already open, do not attempt to launch it. +func SessionBusPrivateNoAutoStartup(opts ...ConnOption) (*Conn, error) { + address, err := getSessionBusAddress(false) if err != nil { return nil, err } @@ -121,7 +135,7 @@ func SystemBus() (conn *Conn, err error) { // ConnectSessionBus connects to the session bus. func ConnectSessionBus(opts ...ConnOption) (*Conn, error) { - address, err := getSessionBusAddress() + address, err := getSessionBusAddress(true) if err != nil { return nil, err } @@ -180,7 +194,7 @@ func Dial(address string, opts ...ConnOption) (*Conn, error) { // // Deprecated: use Dial with options instead. func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { - return Dial(address, WithSignalHandler(signalHandler)) + return Dial(address, WithHandler(handler), WithSignalHandler(signalHandler)) } // ConnOption is a connection option. @@ -478,14 +492,24 @@ func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { conn.outInt(msg) } err := conn.outHandler.sendAndIfClosed(msg, ifClosed) - conn.calls.handleSendError(msg, err) if err != nil { - conn.serialGen.RetireSerial(msg.serial) + conn.handleSendError(msg, err) } else if msg.Type != TypeMethodCall { conn.serialGen.RetireSerial(msg.serial) } } +func (conn *Conn) handleSendError(msg *Message, err error) { + if msg.Type == TypeMethodCall { + conn.calls.handleSendError(msg, err) + } else if msg.Type == TypeMethodReply { + if _, ok := err.(FormatError); ok { + conn.sendError(err, msg.Headers[FieldDestination].value.(string), msg.Headers[FieldReplySerial].value.(uint32)) + } + } + conn.serialGen.RetireSerial(msg.serial) +} + // Send sends the given message to the message bus. You usually don't need to // use this; use the higher-level equivalents (Call / Go, Emit and Export) // instead. If msg is a method call and NoReplyExpected is not set, a non-nil diff --git a/vendor/github.com/godbus/dbus/v5/decoder.go b/vendor/github.com/godbus/dbus/v5/decoder.go index ede91575b..89bfed9d1 100644 --- a/vendor/github.com/godbus/dbus/v5/decoder.go +++ b/vendor/github.com/godbus/dbus/v5/decoder.go @@ -10,14 +10,16 @@ type decoder struct { in io.Reader order binary.ByteOrder pos int + fds []int } // newDecoder returns a new decoder that reads values from in. The input is // expected to be in the given byte order. -func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { +func newDecoder(in io.Reader, order binary.ByteOrder, fds []int) *decoder { dec := new(decoder) dec.in = in dec.order = order + dec.fds = fds return dec } @@ -53,7 +55,7 @@ func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { vs = make([]interface{}, 0) s := sig.str for s != "" { - err, rem := validSingle(s, 0) + err, rem := validSingle(s, &depthCounter{}) if err != nil { return nil, err } @@ -150,7 +152,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { if len(sig.str) == 0 { panic(FormatError("variant signature is empty")) } - err, rem := validSingle(sig.str, 0) + err, rem := validSingle(sig.str, &depthCounter{}) if err != nil { panic(err) } @@ -161,7 +163,11 @@ func (dec *decoder) decode(s string, depth int) interface{} { variant.value = dec.decode(sig.str, depth+1) return variant case 'h': - return UnixFDIndex(dec.decode("u", depth).(uint32)) + idx := dec.decode("u", depth).(uint32) + if int(idx) < len(dec.fds) { + return UnixFD(dec.fds[idx]) + } + return UnixFDIndex(idx) case 'a': if len(s) > 1 && s[1] == '{' { ksig := s[2:3] @@ -219,7 +225,7 @@ func (dec *decoder) decode(s string, depth int) interface{} { v := make([]interface{}, 0) s = s[1 : len(s)-1] for s != "" { - err, rem := validSingle(s, 0) + err, rem := validSingle(s, &depthCounter{}) if err != nil { panic(err) } diff --git a/vendor/github.com/godbus/dbus/v5/encoder.go b/vendor/github.com/godbus/dbus/v5/encoder.go index adfbb75c5..015b26cd5 100644 --- a/vendor/github.com/godbus/dbus/v5/encoder.go +++ b/vendor/github.com/godbus/dbus/v5/encoder.go @@ -5,28 +5,33 @@ import ( "encoding/binary" "io" "reflect" + "strings" + "unicode/utf8" ) // An encoder encodes values to the D-Bus wire format. type encoder struct { out io.Writer + fds []int order binary.ByteOrder pos int } // NewEncoder returns a new encoder that writes to out in the given byte order. -func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { - return newEncoderAtOffset(out, 0, order) +func newEncoder(out io.Writer, order binary.ByteOrder, fds []int) *encoder { + enc := newEncoderAtOffset(out, 0, order, fds) + return enc } // newEncoderAtOffset returns a new encoder that writes to out in the given // byte order. Specify the offset to initialize pos for proper alignment // computation. -func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder { +func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder, fds []int) *encoder { enc := new(encoder) enc.out = out enc.order = order enc.pos = offset + enc.fds = fds return enc } @@ -75,6 +80,9 @@ func (enc *encoder) Encode(vs ...interface{}) (err error) { // encode encodes the given value to the writer and panics on error. depth holds // the depth of the container nesting. func (enc *encoder) encode(v reflect.Value, depth int) { + if depth > 64 { + panic(FormatError("input exceeds depth limitation")) + } enc.align(alignment(v.Type())) switch v.Kind() { case reflect.Uint8: @@ -97,7 +105,14 @@ func (enc *encoder) encode(v reflect.Value, depth int) { enc.binwrite(uint16(v.Uint())) enc.pos += 2 case reflect.Int, reflect.Int32: - enc.binwrite(int32(v.Int())) + if v.Type() == unixFDType { + fd := v.Int() + idx := len(enc.fds) + enc.fds = append(enc.fds, int(fd)) + enc.binwrite(uint32(idx)) + } else { + enc.binwrite(int32(v.Int())) + } enc.pos += 4 case reflect.Uint, reflect.Uint32: enc.binwrite(uint32(v.Uint())) @@ -112,9 +127,21 @@ func (enc *encoder) encode(v reflect.Value, depth int) { enc.binwrite(v.Float()) enc.pos += 8 case reflect.String: - enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + str := v.String() + if !utf8.ValidString(str) { + panic(FormatError("input has a not-utf8 char in string")) + } + if strings.IndexByte(str, byte(0)) != -1 { + panic(FormatError("input has a null char('\\000') in string")) + } + if v.Type() == objectPathType { + if !ObjectPath(str).IsValid() { + panic(FormatError("invalid object path")) + } + } + enc.encode(reflect.ValueOf(uint32(len(str))), depth) b := make([]byte, v.Len()+1) - copy(b, v.String()) + copy(b, str) b[len(b)-1] = 0 n, err := enc.out.Write(b) if err != nil { @@ -124,20 +151,23 @@ func (enc *encoder) encode(v reflect.Value, depth int) { case reflect.Ptr: enc.encode(v.Elem(), depth) case reflect.Slice, reflect.Array: - if depth >= 64 { - panic(FormatError("input exceeds container depth limit")) - } // Lookahead offset: 4 bytes for uint32 length (with alignment), // plus alignment for elements. n := enc.padding(0, 4) + 4 offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem())) var buf bytes.Buffer - bufenc := newEncoderAtOffset(&buf, offset, enc.order) + bufenc := newEncoderAtOffset(&buf, offset, enc.order, enc.fds) for i := 0; i < v.Len(); i++ { bufenc.encode(v.Index(i), depth+1) } + + if buf.Len() > 1<<26 { + panic(FormatError("input exceeds array size limitation")) + } + + enc.fds = bufenc.fds enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) length := buf.Len() enc.align(alignment(v.Type().Elem())) @@ -146,13 +176,10 @@ func (enc *encoder) encode(v reflect.Value, depth int) { } enc.pos += length case reflect.Struct: - if depth >= 64 && v.Type() != signatureType { - panic(FormatError("input exceeds container depth limit")) - } switch t := v.Type(); t { case signatureType: str := v.Field(0) - enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + enc.encode(reflect.ValueOf(byte(str.Len())), depth) b := make([]byte, str.Len()+1) copy(b, str.String()) b[len(b)-1] = 0 @@ -176,9 +203,6 @@ func (enc *encoder) encode(v reflect.Value, depth int) { case reflect.Map: // Maps are arrays of structures, so they actually increase the depth by // 2. - if depth >= 63 { - panic(FormatError("input exceeds container depth limit")) - } if !isKeyType(v.Type().Key()) { panic(InvalidTypeError{v.Type()}) } @@ -189,12 +213,13 @@ func (enc *encoder) encode(v reflect.Value, depth int) { offset := enc.pos + n + enc.padding(n, 8) var buf bytes.Buffer - bufenc := newEncoderAtOffset(&buf, offset, enc.order) + bufenc := newEncoderAtOffset(&buf, offset, enc.order, enc.fds) for _, k := range keys { bufenc.align(8) bufenc.encode(k, depth+2) bufenc.encode(v.MapIndex(k), depth+2) } + enc.fds = bufenc.fds enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) length := buf.Len() enc.align(8) diff --git a/vendor/github.com/godbus/dbus/v5/export.go b/vendor/github.com/godbus/dbus/v5/export.go index 2447b51d4..522334715 100644 --- a/vendor/github.com/godbus/dbus/v5/export.go +++ b/vendor/github.com/godbus/dbus/v5/export.go @@ -26,6 +26,27 @@ var ( } ) +func MakeNoObjectError(path ObjectPath) Error { + return Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{fmt.Sprintf("No such object '%s'", string(path))}, + } +} + +func MakeUnknownMethodError(methodName string) Error { + return Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{fmt.Sprintf("Unknown / invalid method '%s'", methodName)}, + } +} + +func MakeUnknownInterfaceError(ifaceName string) Error { + return Error{ + "org.freedesktop.DBus.Error.UnknownInterface", + []interface{}{fmt.Sprintf("Object does not implement the interface '%s'", ifaceName)}, + } +} + func MakeFailedError(err error) *Error { return &Error{ "org.freedesktop.DBus.Error.Failed", @@ -128,6 +149,11 @@ func (conn *Conn) handleCall(msg *Message) { ifaceName, _ := msg.Headers[FieldInterface].value.(string) sender, hasSender := msg.Headers[FieldSender].value.(string) serial := msg.serial + + if len(name) == 0 { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + if ifaceName == "org.freedesktop.DBus.Peer" { switch name { case "Ping": @@ -135,29 +161,26 @@ func (conn *Conn) handleCall(msg *Message) { case "GetMachineId": conn.sendReply(sender, serial, conn.uuid) default: - conn.sendError(ErrMsgUnknownMethod, sender, serial) + conn.sendError(MakeUnknownMethodError(name), sender, serial) } return } - if len(name) == 0 { - conn.sendError(ErrMsgUnknownMethod, sender, serial) - } object, ok := conn.handler.LookupObject(path) if !ok { - conn.sendError(ErrMsgNoObject, sender, serial) + conn.sendError(MakeNoObjectError(path), sender, serial) return } iface, exists := object.LookupInterface(ifaceName) if !exists { - conn.sendError(ErrMsgUnknownInterface, sender, serial) + conn.sendError(MakeUnknownInterfaceError(ifaceName), sender, serial) return } m, exists := iface.LookupMethod(name) if !exists { - conn.sendError(ErrMsgUnknownMethod, sender, serial) + conn.sendError(MakeUnknownMethodError(name), sender, serial) return } args, err := conn.decodeArguments(m, sender, msg) diff --git a/vendor/github.com/godbus/dbus/v5/message.go b/vendor/github.com/godbus/dbus/v5/message.go index 6a925367e..16693eb30 100644 --- a/vendor/github.com/godbus/dbus/v5/message.go +++ b/vendor/github.com/godbus/dbus/v5/message.go @@ -118,11 +118,7 @@ type header struct { Variant } -// DecodeMessage tries to decode a single message in the D-Bus wire format -// from the given reader. The byte order is figured out from the first byte. -// The possibly returned error can be an error of the underlying reader, an -// InvalidMessageError or a FormatError. -func DecodeMessage(rd io.Reader) (msg *Message, err error) { +func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) { var order binary.ByteOrder var hlength, length uint32 var typ, flags, proto byte @@ -142,7 +138,7 @@ func DecodeMessage(rd io.Reader) (msg *Message, err error) { return nil, InvalidMessageError("invalid byte order") } - dec := newDecoder(rd, order) + dec := newDecoder(rd, order, fds) dec.pos = 1 msg = new(Message) @@ -166,7 +162,7 @@ func DecodeMessage(rd io.Reader) (msg *Message, err error) { if hlength+length+16 > 1<<27 { return nil, InvalidMessageError("message is too long") } - dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order, fds) dec.pos = 12 vs, err = dec.Decode(Signature{"a(yv)"}) if err != nil { @@ -196,7 +192,7 @@ func DecodeMessage(rd io.Reader) (msg *Message, err error) { sig, _ := msg.Headers[FieldSignature].value.(Signature) if sig.str != "" { buf := bytes.NewBuffer(body) - dec = newDecoder(buf, order) + dec = newDecoder(buf, order, fds) vs, err := dec.Decode(sig) if err != nil { return nil, err @@ -207,12 +203,32 @@ func DecodeMessage(rd io.Reader) (msg *Message, err error) { return } -// EncodeTo encodes and sends a message to the given writer. The byte order must -// be either binary.LittleEndian or binary.BigEndian. If the message is not -// valid or an error occurs when writing, an error is returned. -func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + return DecodeMessageWithFDs(rd, make([]int, 0)); +} + +type nullwriter struct{} + +func (nullwriter) Write(p []byte) (cnt int, err error) { + return len(p), nil +} + +func (msg *Message) CountFds() (int, error) { + if len(msg.Body) == 0 { + return 0, nil + } + enc := newEncoder(nullwriter{}, nativeEndian, make([]int, 0)) + err := enc.Encode(msg.Body...) + return len(enc.fds), err +} + +func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) { if err := msg.IsValid(); err != nil { - return err + return make([]int, 0), err } var vs [7]interface{} switch order { @@ -221,12 +237,16 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { case binary.BigEndian: vs[0] = byte('B') default: - return errors.New("dbus: invalid byte order") + return make([]int, 0), errors.New("dbus: invalid byte order") } body := new(bytes.Buffer) - enc := newEncoder(body, order) + fds = make([]int, 0) + enc := newEncoder(body, order, fds) if len(msg.Body) != 0 { - enc.Encode(msg.Body...) + err = enc.Encode(msg.Body...) + if err != nil { + return + } } vs[1] = msg.Type vs[2] = msg.Flags @@ -239,17 +259,28 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { } vs[6] = headers var buf bytes.Buffer - enc = newEncoder(&buf, order) - enc.Encode(vs[:]...) + enc = newEncoder(&buf, order, enc.fds) + err = enc.Encode(vs[:]...) + if err != nil { + return + } enc.align(8) body.WriteTo(&buf) if buf.Len() > 1<<27 { - return InvalidMessageError("message is too long") + return make([]int, 0), InvalidMessageError("message is too long") } if _, err := buf.WriteTo(out); err != nil { - return err + return make([]int, 0), err } - return nil + return enc.fds, nil +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error) { + _, err = msg.EncodeToWithFDs(out, order) + return err } // IsValid checks whether msg is a valid message and returns an diff --git a/vendor/github.com/godbus/dbus/v5/sig.go b/vendor/github.com/godbus/dbus/v5/sig.go index 2d326cebc..41a039812 100644 --- a/vendor/github.com/godbus/dbus/v5/sig.go +++ b/vendor/github.com/godbus/dbus/v5/sig.go @@ -34,7 +34,7 @@ type Signature struct { func SignatureOf(vs ...interface{}) Signature { var s string for _, v := range vs { - s += getSignature(reflect.TypeOf(v)) + s += getSignature(reflect.TypeOf(v), &depthCounter{}) } return Signature{s} } @@ -42,11 +42,19 @@ func SignatureOf(vs ...interface{}) Signature { // SignatureOfType returns the signature of the given type. It panics if the // type is not representable in D-Bus. func SignatureOfType(t reflect.Type) Signature { - return Signature{getSignature(t)} + return Signature{getSignature(t, &depthCounter{})} } // getSignature returns the signature of the given type and panics on unknown types. -func getSignature(t reflect.Type) string { +func getSignature(t reflect.Type, depth *depthCounter) (sig string) { + if !depth.Valid() { + panic("container nesting too deep") + } + defer func() { + if len(sig) > 255 { + panic("signature exceeds the length limitation") + } + }() // handle simple types first switch t.Kind() { case reflect.Uint8: @@ -74,7 +82,7 @@ func getSignature(t reflect.Type) string { case reflect.Float64: return "d" case reflect.Ptr: - return getSignature(t.Elem()) + return getSignature(t.Elem(), depth) case reflect.String: if t == objectPathType { return "o" @@ -90,17 +98,20 @@ func getSignature(t reflect.Type) string { for i := 0; i < t.NumField(); i++ { field := t.Field(i) if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { - s += getSignature(t.Field(i).Type) + s += getSignature(t.Field(i).Type, depth.EnterStruct()) } } + if len(s) == 0 { + panic("empty struct") + } return "(" + s + ")" case reflect.Array, reflect.Slice: - return "a" + getSignature(t.Elem()) + return "a" + getSignature(t.Elem(), depth.EnterArray()) case reflect.Map: if !isKeyType(t.Key()) { panic(InvalidTypeError{t}) } - return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + return "a{" + getSignature(t.Key(), depth.EnterArray().EnterDictEntry()) + getSignature(t.Elem(), depth.EnterArray().EnterDictEntry()) + "}" case reflect.Interface: return "v" } @@ -118,7 +129,7 @@ func ParseSignature(s string) (sig Signature, err error) { } sig.str = s for err == nil && len(s) != 0 { - err, s = validSingle(s, 0) + err, s = validSingle(s, &depthCounter{}) } if err != nil { sig = Signature{""} @@ -144,7 +155,7 @@ func (s Signature) Empty() bool { // Single returns whether the signature represents a single, complete type. func (s Signature) Single() bool { - err, r := validSingle(s.str, 0) + err, r := validSingle(s.str, &depthCounter{}) return err != nil && r == "" } @@ -164,15 +175,38 @@ func (e SignatureError) Error() string { return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) } +type depthCounter struct { + arrayDepth, structDepth, dictEntryDepth int +} + +func (cnt *depthCounter) Valid() bool { + return cnt.arrayDepth <= 32 && cnt.structDepth <= 32 && cnt.dictEntryDepth <= 32 +} + +func (cnt depthCounter) EnterArray() *depthCounter { + cnt.arrayDepth++ + return &cnt +} + +func (cnt depthCounter) EnterStruct() *depthCounter { + cnt.structDepth++ + return &cnt +} + +func (cnt depthCounter) EnterDictEntry() *depthCounter { + cnt.dictEntryDepth++ + return &cnt +} + // Try to read a single type from this string. If it was successful, err is nil // and rem is the remaining unparsed part. Otherwise, err is a non-nil // SignatureError and rem is "". depth is the current recursion depth which may // not be greater than 64 and should be given as 0 on the first call. -func validSingle(s string, depth int) (err error, rem string) { +func validSingle(s string, depth *depthCounter) (err error, rem string) { if s == "" { return SignatureError{Sig: s, Reason: "empty signature"}, "" } - if depth > 64 { + if !depth.Valid() { return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" } switch s[0] { @@ -187,10 +221,10 @@ func validSingle(s string, depth int) (err error, rem string) { i++ rem = s[i+1:] s = s[2:i] - if err, _ = validSingle(s[:1], depth+1); err != nil { + if err, _ = validSingle(s[:1], depth.EnterArray().EnterDictEntry()); err != nil { return err, "" } - err, nr := validSingle(s[1:], depth+1) + err, nr := validSingle(s[1:], depth.EnterArray().EnterDictEntry()) if err != nil { return err, "" } @@ -199,7 +233,7 @@ func validSingle(s string, depth int) (err error, rem string) { } return nil, rem } - return validSingle(s[1:], depth+1) + return validSingle(s[1:], depth.EnterArray()) case '(': i := findMatching(s, '(', ')') if i == -1 { @@ -208,7 +242,7 @@ func validSingle(s string, depth int) (err error, rem string) { rem = s[i+1:] s = s[1:i] for err == nil && s != "" { - err, s = validSingle(s, depth+1) + err, s = validSingle(s, depth.EnterStruct()) } if err != nil { rem = "" @@ -236,7 +270,7 @@ func findMatching(s string, left, right rune) int { // typeFor returns the type of the given signature. It ignores any left over // characters and panics if s doesn't start with a valid type signature. func typeFor(s string) (t reflect.Type) { - err, _ := validSingle(s, 0) + err, _ := validSingle(s, &depthCounter{}) if err != nil { panic(err) } diff --git a/vendor/github.com/godbus/dbus/v5/transport_generic.go b/vendor/github.com/godbus/dbus/v5/transport_generic.go index 718a1ff02..a08e2813c 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_generic.go +++ b/vendor/github.com/godbus/dbus/v5/transport_generic.go @@ -41,10 +41,12 @@ func (t genericTransport) ReadMessage() (*Message, error) { } func (t genericTransport) SendMessage(msg *Message) error { - for _, v := range msg.Body { - if _, ok := v.(UnixFD); ok { - return errors.New("dbus: unix fd passing not enabled") - } + fds, err := msg.CountFds() + if err != nil { + return err + } + if fds != 0 { + return errors.New("dbus: unix fd passing not enabled") } return msg.EncodeTo(t, nativeEndian) } diff --git a/vendor/github.com/godbus/dbus/v5/transport_unix.go b/vendor/github.com/godbus/dbus/v5/transport_unix.go index c7cd02f97..2212e7fa7 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_unix.go +++ b/vendor/github.com/godbus/dbus/v5/transport_unix.go @@ -113,7 +113,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) { if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil { return nil, err } - dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec := newDecoder(bytes.NewBuffer(headerdata), order, make([]int, 0)) dec.pos = 12 vs, err := dec.Decode(Signature{"a(yv)"}) if err != nil { @@ -147,7 +147,7 @@ func (t *unixTransport) ReadMessage() (*Message, error) { if err != nil { return nil, err } - msg, err := DecodeMessage(bytes.NewBuffer(all)) + msg, err := DecodeMessageWithFDs(bytes.NewBuffer(all), fds) if err != nil { return nil, err } @@ -179,21 +179,21 @@ func (t *unixTransport) ReadMessage() (*Message, error) { } func (t *unixTransport) SendMessage(msg *Message) error { - fds := make([]int, 0) - for i, v := range msg.Body { - if fd, ok := v.(UnixFD); ok { - msg.Body[i] = UnixFDIndex(len(fds)) - fds = append(fds, int(fd)) - } + fdcnt, err := msg.CountFds() + if err != nil { + return err } - if len(fds) != 0 { + if fdcnt != 0 { if !t.hasUnixFDs { return errors.New("dbus: unix fd passing not enabled") } - msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) - oob := syscall.UnixRights(fds...) + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(fdcnt)) buf := new(bytes.Buffer) - msg.EncodeTo(buf, nativeEndian) + fds, err := msg.EncodeToWithFDs(buf, nativeEndian) + if err != nil { + return err + } + oob := syscall.UnixRights(fds...) n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) if err != nil { return err diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_netbsd.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_netbsd.go new file mode 100644 index 000000000..af7bafdf9 --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_netbsd.go @@ -0,0 +1,14 @@ +package dbus + +import "io" + +func (t *unixTransport) SendNullByte() error { + n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil) + if err != nil { + return err + } + if n != 1 { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 203e84eba..000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,17 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Amazon.com, Inc -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Klaus Post -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index d9914732b..000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,39 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Jonathan Swinney -Kai Backman -Klaus Post -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s deleted file mode 100644 index 7a3ead17e..000000000 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 - -loop: - // for s < len(src) - CMP R13, R6 - BEQ end - - // R4 = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 - MOVW $1, R1 - CMPW R1, R3 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R6, R6 - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R4 == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R4 - BGT callMemmove - CMP $16, R2 - BLT callMemmove - CMP $16, R3 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 - BGT errCorrupt - CMP R3, R4 - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // case x == 60: - MOVW $61, R1 - CMPW R1, R4 - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R6), R4 - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 - B doLit - -tagLit62Plus: - CMPW $62, R4 - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt - - // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 - BLT errCorrupt - - // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R4 - BGT slowForwardCopy - CMP $8, R5 - BLT slowForwardCopy - CMP $16, R14 - BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R14, R14 - CMP R14, R4 - BGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMP $8, R5 - BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R7, R2 - ADD R4, R7, R7 - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R4 - BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R10, R7 - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go deleted file mode 100644 index 7082b3491..000000000 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index bf83667d7..000000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15*1), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15*1), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e714..000000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb2c..000000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index dad29725f..d324c43ba 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 876abb500..aa8cbd7ce 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -97,7 +97,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [janoszen/containerssh](https://github.com/janoszen/containerssh) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) ## Install diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index afa84a1e2..8c2a8fcd9 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -95,13 +95,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } if src.Kind() != reflect.Map { diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5f..c589addf9 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,8 +8,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod index e05c42ff5..e817cccbf 100644 --- a/vendor/github.com/json-iterator/go/go.mod +++ b/vendor/github.com/json-iterator/go/go.mod @@ -6,6 +6,6 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/google/gofuzz v1.0.0 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/modern-go/reflect2 v1.0.2 github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum index be00a6df9..4b7bb8a29 100644 --- a/vendor/github.com/json-iterator/go/go.sum +++ b/vendor/github.com/json-iterator/go/go.sum @@ -5,11 +5,10 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/klauspost/compress/.gitignore similarity index 74% rename from vendor/github.com/willf/bitset/.gitignore rename to vendor/github.com/klauspost/compress/.gitignore index 5c204d28b..d31b37815 100644 --- a/vendor/github.com/willf/bitset/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -22,5 +22,11 @@ _testmain.go *.exe *.test *.prof +/s2/cmd/_s2sx/sfx-exe -target +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..a2bf06e94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@v0.7.2 + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 1eb75ef68..87d557477 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -26,3 +26,279 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..d73fb86e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,593 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 5283ac5a5..07265dded 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,6 +6,7 @@ package flate import ( + "encoding/binary" "fmt" "io" "math" @@ -37,15 +38,17 @@ const ( maxMatchLength = 258 // The longest match for the compressor minOffsetSize = 1 // The shortest offset that makes any sense - // The maximum number of tokens we put into a single flat block, just too - // stop things from getting too large. - maxFlateBlockTokens = 1 << 14 + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 maxStoreBlockSize = 65535 hashBits = 17 // After 17 performance degrades hashSize = 1 << hashBits hashMask = (1 << hashBits) - 1 hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 24 + maxHashOffset = 1 << 28 skipNever = math.MaxInt32 @@ -70,9 +73,9 @@ var levels = []compressionLevel{ {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". - {8, 8, 24, 16, skipNever, 7}, - {10, 16, 24, 64, skipNever, 8}, - {32, 258, 258, 4096, skipNever, 9}, + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, } // advancedState contains state for the advanced levels, with bigger hash tables, etc. @@ -81,28 +84,29 @@ type advancedState struct { length int offset int maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + estBitsPerByte int + hashMatch [maxMatchLength + minMatchLength]uint32 // Input hash chains // hashHead[hashValue] contains the largest inputIndex with the specified hash value // If hashHead[hashValue] is within the current window, then // hashPrev[hashHead[hashValue] & windowMask] contains the previous index // with the same hash value. - chainHead int - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 - hashOffset int - - // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 - - hash uint32 - ii uint16 // position of last match, intended to overflow to reset. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 } type compressor struct { compressionLevel + h *huffmanEncoder w *huffmanBitWriter // compression algorithm @@ -127,7 +131,8 @@ func (d *compressor) fillDeflate(b []byte) int { s := d.state if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { // shift the window by windowSize - copy(d.window[:], d.window[windowSize:2*windowSize]) + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) s.index -= windowSize d.windowEnd -= windowSize if d.blockStart >= windowSize { @@ -170,7 +175,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { window = d.window[d.blockStart:index] } d.blockStart = index - d.w.writeBlock(tok, eof, window) + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) return d.w.err } return nil @@ -253,7 +259,6 @@ func (d *compressor) fillWindow(b []byte) { // Set the head of the hash chain to us. s.hashHead[newH] = uint32(di + s.hashOffset) } - s.hash = newH } // Update window information. d.windowEnd += n @@ -263,7 +268,7 @@ func (d *compressor) fillWindow(b []byte) { // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead @@ -279,36 +284,75 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain - length = prevLength - if length >= d.good { - tries >>= 2 - } + length = minMatchLength - 1 wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + cGain := 0 + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 6 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:i+minMatchLook], wPos) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] } - wEnd = win[pos+n] } } - if i == minIndex { + if i <= minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex || i < 0 { + if i < minIndex { break } } @@ -327,8 +371,13 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - b = b[:4] - return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) } // bulkHash4 will compute hashes using the same @@ -337,11 +386,12 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + hb := binary.LittleEndian.Uint32(b) + dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 for i := 1; i < end; i++ { - hb = (hb << 8) | uint32(b[i+3]) + hb = (hb >> 8) | uint32(b[i+3])<<24 dst[i] = hash4u(hb, hashBits) } } @@ -358,7 +408,6 @@ func (d *compressor) initDeflate() { s.hashOffset = 1 s.length = minMatchLength - 1 s.offset = 0 - s.hash = 0 s.chainHead = -1 } @@ -374,11 +423,19 @@ func (d *compressor) deflateLazy() { if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - } for { if sanity && s.index > d.windowEnd { @@ -410,11 +467,11 @@ func (d *compressor) deflateLazy() { } if s.index < s.maxInsertIndex { // Update the hash - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - ch := s.hashHead[s.hash&hashMask] + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] s.chainHead = int(ch) s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) + s.hashHead[hash] = uint32(s.index + s.hashOffset) } prevLength := s.length prevOffset := s.offset @@ -426,12 +483,37 @@ func (d *compressor) deflateLazy() { } if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { s.length = newLength s.offset = newOffset } } + if prevLength >= minMatchLength && s.length <= prevLength { + // Check for better match at end... + // + // checkOff must be >=2 since we otherwise risk checking s.index + // Offset of 2 seems to yield best results. + const checkOff = 2 + prevIndex := s.index - 1 + if prevIndex+prevLength+checkOff < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength { + end = maxMatchLength + } + end += prevIndex + idx := prevIndex + prevLength - (4 - checkOff) + h := hash4(d.window[idx:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff) + if ch2 > minIndex { + length := matchLen(d.window[prevIndex:end], d.window[ch2:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + } + } + } // There was a match at the previous step, and the current match is // not better. Output the previous match. d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) @@ -466,7 +548,6 @@ func (d *compressor) deflateLazy() { // Set the head of the hash chain to us. s.hashHead[newH] = uint32(di + s.hashOffset) } - s.hash = newH } s.index = newIndex @@ -479,6 +560,7 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + s.ii = 0 } else { // Reset, if we got a match this run. if s.length >= minMatchLength { @@ -498,13 +580,12 @@ func (d *compressor) deflateLazy() { // If we have a long run of no matches, skip additional bytes // Resets when s.ii overflows after 64KB. - if s.ii > 31 { - n := int(s.ii >> 5) + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) for j := 0; j < n; j++ { if s.index >= d.windowEnd-1 { break } - d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { @@ -512,6 +593,14 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } s.index++ } // Flush last byte @@ -611,7 +700,9 @@ func (d *compressor) write(b []byte) (n int, err error) { } n = len(b) for len(b) > 0 { - d.step(d) + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } b = b[d.fill(d, b):] if d.err != nil { return 0, d.err @@ -652,13 +743,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) { level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 8 + d.w.logNewTablePenalty = 7 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeFast case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 10 + d.w.logNewTablePenalty = 8 d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() @@ -702,7 +793,6 @@ func (d *compressor) reset(w io.Writer) { d.tokens.Reset() s.length = minMatchLength - 1 s.offset = 0 - s.hash = 0 s.ii = 0 s.maxInsertIndex = 0 } diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index 71c75a065..bb36351a5 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -7,19 +7,19 @@ package flate // dictDecoder implements the LZ77 sliding dictionary as used in decompression. // LZ77 decompresses data through sequences of two forms of commands: // -// * Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. // -// * Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. // // For performance reasons, this implementation performs little to no sanity // checks about the arguments. As such, the invariants documented for each diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 347ac2c90..24caf5f70 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -58,17 +58,6 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -func load32(b []byte, i int) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - func load3232(b []byte, i int32) uint32 { return binary.LittleEndian.Uint32(b[i:]) } @@ -77,10 +66,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func hash(u uint32) uint32 { - return (u * 0x1e35a7bd) >> tableShift -} - type tableEntry struct { offset int32 } @@ -104,7 +89,8 @@ func (e *fastGen) addBlock(src []byte) int32 { } // Move down offset := int32(len(e.hist)) - maxMatchOffset - copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) e.cur += offset e.hist = e.hist[:maxMatchOffset] } @@ -114,39 +100,36 @@ func (e *fastGen) addBlock(src []byte) int32 { return s } -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) -} - type tableEntryPrev struct { Cur tableEntry Prev tableEntry } -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) -} - // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) } -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } } // matchlen will return the match length between offsets and t in src. @@ -179,7 +162,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -213,26 +196,15 @@ func (e *fastGen) Reset() { // matchLen returns the maximum length. // 'a' must be the shortest of the two. func matchLen(a, b []byte) int { - b = b[:len(a)] var checked int - if len(a) >= 4 { - // Try 4 bytes first - if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 { - return bits.TrailingZeros32(diff) >> 3 - } - // Switch to 8 byte matching. - checked = 4 - a = a[4:] - b = b[4:] - for len(a) >= 8 { - b = b[:len(a)] - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] + + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) } + checked += 8 + a = a[8:] + b = b[8:] } b = b[:len(a)] for i := range a { diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 3ad5e9807..89a5dd89f 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,19 +59,22 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. -var offsetExtraBits = [64]int8{ +var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ - 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, + 14, 14, } var offsetCombined = [32]uint32{} func init() { - var offsetBase = [64]uint32{ + var offsetBase = [32]uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, @@ -73,17 +84,15 @@ func init() { 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, + 0x008000, 0x00c000, } for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -99,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -155,37 +164,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) { w.lastHuffMan = false } -func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { - offsets, lits = true, true +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { a := t.offHist[:offsetCodeCount] - b := w.offsetFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - offsets = false - break + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false } } a = t.extraHist[:literalCount-256] - b = w.literalFreq[256:literalCount] + b = w.literalEncoding.codes[256:literalCount] b = b[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break + for i, v := range a { + if v != 0 && b[i].zero() { + return false } } - if lits { - a = t.litHist[:] - b = w.literalFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break - } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false } } - return + return true } func (w *huffmanBitWriter) flush() { @@ -221,8 +226,8 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << w.nbits +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -260,9 +265,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) { // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 @@ -275,12 +280,12 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE // Copy the concatenated code sizes to codegen. Put a marker at the end. cgnl := codegen[:numLiterals] for i := range cgnl { - cgnl[i] = uint8(litEnc.codes[i].len) + cgnl[i] = litEnc.codes[i].len() } cgnl = codegen[numLiterals : numLiterals+numOffsets] for i := range cgnl { - cgnl[i] = uint8(offEnc.codes[i].len) + cgnl[i] = offEnc.codes[i].len() } codegen[numLiterals+numOffsets] = badCode @@ -423,8 +428,8 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { func (w *huffmanBitWriter) writeCode(c hcode) { // The function does not get inlined if we "& 63" the shift. - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() if w.nbits >= 48 { w.writeOutBits() } @@ -455,9 +460,9 @@ func (w *huffmanBitWriter) writeOutBits() { // Write the header of a dynamic Huffman block to the output stream. // -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return @@ -472,7 +477,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -566,7 +571,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { w.lastHeader = 0 } numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate(tokens) + w.generate() var extraBits int storedSize, storable := w.storedSize(input) if storable { @@ -577,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -595,7 +603,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { } // Stored bytes? - if storable && storedSize < size { + if storable && storedSize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -634,22 +642,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 w.lastHuffMan = false } - if !sync { - tokens.Fill() + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 } + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } var size int + // Check if we should reuse. if w.lastHeader > 0 { // Estimate size for using a new table. // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() - newSize += newSize >> w.logNewTablePenalty + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize() + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits // Check if a new table is better. if newSize < reuseSize { @@ -660,35 +685,83 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b } else { size = reuseSize } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } // Check if we get a reasonable size decrease. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if storable && ssize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } } // We want a new block/table if w.lastHeader == 0 { - w.generate(tokens) + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) + var numCodegens int - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } // Write Huffman table. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHeader, _ = w.headerSize() + if !sync { + w.lastHeader, _ = w.headerSize() + } w.lastHuffMan = false } @@ -699,14 +772,29 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) } +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - copy(w.literalFreq[:], t.litHist[:]) - copy(w.literalFreq[256:], t.extraHist[:]) - copy(w.offsetFreq[:], t.offHist[:offsetCodeCount]) + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist if t.n == 0 { return @@ -733,7 +821,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num return } -func (w *huffmanBitWriter) generate(t *tokens) { +func (w *huffmanBitWriter) generate() { w.literalEncoding.generate(w.literalFreq[:literalCount], 15) w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) } @@ -765,11 +853,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] - bits |= uint64(c.code) << nbits - nbits += c.len + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -790,14 +878,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] - bits |= uint64(c.code) << nbits - nbits += c.len + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -815,11 +903,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) - bits |= uint64(extraLength) << nbits + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -839,15 +927,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] - bits |= uint64(c.code) << nbits - nbits += c.len + bits |= c.code64() << (nbits & 63) + nbits += c.len() if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -864,11 +951,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -923,8 +1011,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } } - // Fill is rarely better... - const fill = false const numLiterals = endBlockMarker + 1 const numOffsets = 1 @@ -933,26 +1019,46 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Assume header is around 70 bytes: // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals], fill) - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - if fill { - // Clear fill... - for i := range w.literalFreq[:numLiterals] { - w.literalFreq[i] = 0 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return } - histogram(input, w.literalFreq[:numLiterals], false) } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty } - estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -963,7 +1069,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -996,14 +1102,44 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { encoding := w.literalEncoding.codes[:256] // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << nbits - nbits += c.len - if debugDeflate { - count += int(c.len) + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1015,17 +1151,34 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 67b2b3872..be7b58b47 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -16,8 +16,18 @@ const ( ) // hcode is a huffman code with a bit code and bit length. -type hcode struct { - code, len uint16 +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 } type huffmanEncoder struct { @@ -56,9 +66,12 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { - h.len = length - h.code = code +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) } func reverseBits(number uint16, bitLength byte) uint16 { @@ -80,7 +93,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +112,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = newhcode(reverseBits(bits, size), size) } return h } @@ -108,7 +121,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder { h := newHuffmanEncoder(30) codes := h.codes for ch := range codes { - codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5) } return h } @@ -120,7 +133,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int for i, f := range freq { if f != 0 { - total += int(f) * int(h.codes[i].len) + total += int(f) * int(h.codes[i].len()) } } return total @@ -129,9 +142,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { func (h *huffmanEncoder) bitLengthRaw(b []byte) int { var total int for _, f := range b { - if f != 0 { - total += int(h.codes[f].len) - } + total += int(h.codes[f].len()) } return total } @@ -142,10 +153,10 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int { for i, f := range freq { if f != 0 { code := h.codes[i] - if code.len == 0 { + if code.zero() { return math.MaxInt32 } - total += int(f) * int(code.len) + total += int(f) * int(code.len()) } } return total @@ -157,13 +168,18 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int { // The cases of 0, 1, and 2 literals are handled by special case code. // // list An array of the literals with non-zero frequencies -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// // maxBits The maximum number of bits that should be used to encode any literal. -// Must be less than 16. +// +// Must be less than 16. +// // return An integer array in which array[i] indicates the number of literals -// that should be encoded in i bits. +// +// that should be encoded in i bits. func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { if maxBits >= maxBitsLimit { panic("flate: maxBits too large") @@ -189,14 +205,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -207,8 +228,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -240,7 +261,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -298,7 +325,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) code++ } list = list[0 : len(list)-int(bits)] @@ -311,6 +338,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -319,11 +347,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i] = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { @@ -354,21 +381,37 @@ func atLeastOne(v float32) float32 { return v } -// Unassigned values are assigned '1' in the histogram. -func fillHist(b []uint16) { - for i, v := range b { - if v == 0 { - b[i] = 1 +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ } } } -func histogram(b []byte, h []uint16, fill bool) { +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. h = h[:256] - for _, t := range b { - h[t]++ + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] } - if fill { - fillHist(h) + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ } } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 16bc51408..414c0bea9 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -9,10 +9,10 @@ package flate import ( "bufio" + "compress/flate" "fmt" "io" "math/bits" - "strconv" "sync" ) @@ -36,16 +36,19 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder // A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError int64 - -func (e CorruptInputError) Error() string { - return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) -} +type CorruptInputError = flate.CorruptInputError // An InternalError reports an error in the flate code itself. type InternalError string @@ -55,26 +58,12 @@ func (e InternalError) Error() string { return "flate: internal error: " + strin // A ReadError reports an error encountered while reading input. // // Deprecated: No longer returned. -type ReadError struct { - Offset int64 // byte offset where error occurred - Err error // error returned by underlying Read -} - -func (e *ReadError) Error() string { - return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() -} +type ReadError = flate.ReadError // A WriteError reports an error encountered while writing output. // // Deprecated: No longer returned. -type WriteError struct { - Offset int64 // byte offset where error occurred - Err error // error returned by underlying Write -} - -func (e *WriteError) Error() string { - return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() -} +type WriteError = flate.WriteError // Resetter resets a ReadCloser returned by NewReader or NewReaderDict to // to switch to a new underlying Reader. This permits reusing a ReadCloser @@ -346,11 +335,17 @@ func (f *decompressor) nextBlock() { switch typ { case 0: f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } case 1: // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("predefinied huffman block") + } case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { @@ -359,6 +354,9 @@ func (f *decompressor) nextBlock() { f.hl = &f.h1 f.hd = &f.h2 f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("dynamic huffman block") + } default: // 3 is reserved. if debugDecode { @@ -568,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db2792..61342b6b8 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -83,15 +82,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -223,9 +227,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -238,20 +243,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -327,15 +333,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -467,9 +478,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -482,20 +494,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -571,15 +584,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -711,9 +729,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -726,20 +745,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -815,15 +1086,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -955,9 +1231,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -970,20 +1247,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea396..703b9a89a 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -15,6 +19,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -64,7 +69,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { const skipLog = 5 @@ -73,7 +78,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hashLen(cv, tableBits, hashBytes) candidate = e.table[nextHash] nextS = s + doEvery + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -82,16 +87,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hash(uint32(now)) + nextHash = hashLen(now, tableBits, hashBytes) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } // Do one right away... - cv = uint32(now) + cv = now s = nextS nextS++ candidate = e.table[nextHash] @@ -99,11 +104,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } - cv = uint32(now) + cv = now s = nextS } @@ -116,7 +121,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +155,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { @@ -137,9 +199,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { } if s >= sLimit { // Index first pair after match end. - if int(s+l+4) < len(src) { - cv := load3232(src, s) - e.table[hash(cv)] = tableEntry{offset: s + e.cur} + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -152,16 +214,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // three load32 calls. x := load6432(src, s-2) o := e.cur + s - 2 - prevHash := hash(uint32(x)) + prevHash := hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntry{offset: o} x >>= 16 - currHash := hash(uint32(x)) + currHash := hashLen(x, tableBits, hashBytes) candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = uint32(x >> 8) + cv = x >> 8 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389a..876dfbe30 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -16,6 +16,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 ) if debugDeflate && e.cur < 0 { @@ -66,7 +67,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { // When should we start skipping if we haven't found matches in a long while. const skipLog = 5 @@ -75,7 +76,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash4u(cv, bTableBits) + nextHash := hashLen(cv, bTableBits, hashBytes) s = nextS nextS = s + doEvery + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -84,16 +85,16 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { candidate = e.table[nextHash] now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hash4u(uint32(now), bTableBits) + nextHash = hashLen(now, bTableBits, hashBytes) offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } // Do one right away... - cv = uint32(now) + cv = now s = nextS nextS++ candidate = e.table[nextHash] @@ -101,10 +102,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { e.table[nextHash] = tableEntry{offset: s + e.cur} offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { break } - cv = uint32(now) + cv = now } // A 4-byte match has been found. We'll later see if more than 4 bytes @@ -134,7 +135,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -146,9 +155,9 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { if s >= sLimit { // Index first pair after match end. - if int(s+l+4) < len(src) { - cv := load3232(src, s) - e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} } goto emitRemainder } @@ -156,15 +165,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Store every second hash in-between, but offset by 1. for i := s - l + 2; i < s-5; i += 7 { x := load6432(src, i) - nextHash := hash4u(uint32(x), bTableBits) + nextHash := hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i} // Skip one x >>= 16 - nextHash = hash4u(uint32(x), bTableBits) + nextHash = hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i + 2} // Skip one x >>= 16 - nextHash = hash4u(uint32(x), bTableBits) + nextHash = hashLen(x, bTableBits, hashBytes) e.table[nextHash] = tableEntry{offset: e.cur + i + 4} } @@ -176,17 +185,17 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // three load32 calls. x := load6432(src, s-2) o := e.cur + s - 2 - prevHash := hash4u(uint32(x), bTableBits) - prevHash2 := hash4u(uint32(x>>8), bTableBits) + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) e.table[prevHash] = tableEntry{offset: o} e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hash4u(uint32(x>>16), bTableBits) + currHash := hashLen(x>>16, bTableBits, hashBytes) candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = uint32(x >> 24) + cv = x >> 24 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a..7aa2b72a1 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,14 +5,17 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( - inputMargin = 8 - 1 + inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 ) if debugDeflate && e.cur < 0 { @@ -67,20 +70,20 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. - cv := load3232(src, s) + cv := load6432(src, s) for { - const skipLog = 6 + const skipLog = 7 nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hashLen(cv, tableBits, hashBytes) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { goto emitRemainder } candidates := e.table[nextHash] - now := load3232(src, nextS) + now := load6432(src, nextS) // Safe offset distance until s + 4... minOffset := e.cur + s - (maxMatchOffset - 4) @@ -94,8 +97,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { continue } - if cv == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { break } // Both match and are valid, pick longest. @@ -110,7 +113,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev - if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { break } } @@ -141,7 +144,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -154,9 +165,9 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { if s >= sLimit { t += l // Index first pair after match end. - if int(t+4) < len(src) && t > 0 { - cv := load3232(src, t) - nextHash := hash(cv) + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,32 +176,33 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hashLen(x, tableBits, hashBytes) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hashLen(x, tableBits, hashBytes) candidates := e.table[currHash] - cv = uint32(x) + cv = x e.table[currHash] = tableEntryPrev{ Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}, @@ -200,18 +212,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev - if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue } } - cv = uint32(x >> 8) + cv = x >> 8 s++ break } diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b..23c08b325 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -12,6 +12,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -80,7 +81,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { nextS := s var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS @@ -135,7 +136,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { @@ -160,7 +169,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(s+8) < len(src) { cv := load6432(src, s) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} } goto emitRemainder @@ -175,7 +184,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 i += 3 for ; i < s-1; i += 3 { @@ -184,7 +193,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} e.bTable[hash7(cv, tableBits)] = t e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 } } } @@ -193,7 +202,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // compression we first update the hash table at s-1 and at s. x := load6432(src, s-1) o := e.cur + s - 1 - prevHashS := hash4x64(x, tableBits) + prevHashS := hashLen(x, tableBits, hashShortBytes) prevHashL := hash7(x, tableBits) e.table[prevHashS] = tableEntry{offset: o} e.bTable[prevHashL] = tableEntry{offset: o} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320..83ef50ba4 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -12,6 +12,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -88,7 +89,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { var l int32 var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS @@ -105,7 +106,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = entry, eLong.Cur - nextHashS = hash4x64(next, tableBits) + nextHashS = hashLen(next, tableBits, hashShortBytes) nextHashL = hash7(next, tableBits) t = lCandidate.Cur.offset - e.cur @@ -191,14 +192,21 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // Try to locate a better match by checking the end of best match... if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - // Test current - t2 := eLong - e.cur - l - off := s - t2 + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } } @@ -210,7 +218,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { @@ -242,7 +258,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if i < s-1 { cv := load6432(src, i) t := tableEntry{offset: i + e.cur} - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur @@ -255,7 +271,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // We only have enough bits for a short entry at i+2 cv >>= 8 t = tableEntry{offset: t.offset + 1} - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t // Skip one - otherwise we risk hitting 's' i += 4 @@ -265,7 +281,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hash4u(uint32(cv>>8), tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 } } } @@ -274,7 +290,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // compression we first update the hash table at s-1 and at s. x := load6432(src, s-1) o := e.cur + s - 1 - prevHashS := hash4x64(x, tableBits) + prevHashS := hashLen(x, tableBits, hashShortBytes) prevHashL := hash7(x, tableBits) e.table[prevHashS] = tableEntry{offset: o} eLong := &e.bTable[prevHashL] diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec..f1e9d98fa 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -12,6 +12,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { const ( inputMargin = 12 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 ) if debugDeflate && e.cur < 0 { panic(fmt.Sprint("e.cur < 0: ", e.cur)) @@ -90,7 +91,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { var l int32 var t int32 for { - nextHashS := hash4x64(cv, tableBits) + nextHashS := hashLen(cv, tableBits, hashShortBytes) nextHashL := hash7(cv, tableBits) s = nextS nextS = s + doEvery + (s-nextEmit)>>skipLog @@ -107,7 +108,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { eLong.Cur, eLong.Prev = entry, eLong.Cur // Calculate hashes of 'next' - nextHashS = hash4x64(next, tableBits) + nextHashS = hashLen(next, tableBits, hashShortBytes) nextHashL = hash7(next, tableBits) t = lCandidate.Cur.offset - e.cur @@ -213,24 +214,33 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Try to locate a better match by checking the end-of-match... if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] // Test current - t2 := eLong.Cur.offset - e.cur - l - off := s - t2 + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 if off < maxMatchOffset { if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } // Test next: - t2 = eLong.Prev.offset - e.cur - l - off := s - t2 + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s, t2, src); l2 > l { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { t = t2 l = l2 + s = s2 } } } @@ -243,7 +253,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { @@ -269,7 +287,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Index after match end. for i := nextS + 1; i < int32(len(src))-8; i += 2 { cv := load6432(src, i) - e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} eLong := &e.bTable[hash7(cv, tableBits)] eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur } @@ -284,7 +302,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t2 := tableEntry{offset: t.offset + 1} eLong := &e.bTable[hash7(cv, tableBits)] eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hash4x64(cv, tableBits)] = t + e.table[hashLen(cv, tableBits, hashShortBytes)] = t eLong.Cur, eLong.Prev = t, eLong.Cur eLong2.Cur, eLong2.Prev = t2, eLong2.Cur } diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go index f477a5d6e..1b7a2cbd7 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -1,4 +1,5 @@ -//+build !amd64 +//go:build !amd64 +// +build !amd64 package flate diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e899124..f3d4139ef 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{ }, } -// StatelessDeflate allows to compress directly to a Writer without retaining state. +// StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { @@ -86,11 +86,19 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { dict = dict[len(dict)-maxStatelessDict:] } + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + for len(in) > 0 { todo := in - if len(todo) > maxStatelessBlock-len(dict) { + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { todo = todo[:maxStatelessBlock-len(dict)] } + inOrg := in in = in[len(todo):] uncompressed := todo if len(dict) > 0 { @@ -102,7 +110,11 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { todo = combined } // Compress - statelessEnc(&dst, todo, int16(len(dict))) + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } isEof := eof && len(in) == 0 if dst.n == 0 { @@ -119,7 +131,8 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } if len(in) > 0 { // Retain a dict if we have more - dict = todo[len(todo)-maxStatelessDict:] + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil dst.Reset() } if bw.err != nil { @@ -249,7 +262,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index eb862d7a9..d818790c1 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1<maxnumlit offHist [32]uint16 // offset codes litHist [256]uint16 // codes 0->255 - n uint16 // Must be able to contain maxStoreBlockSize + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize tokens [maxStoreBlockSize + 1]token } @@ -142,7 +141,7 @@ func (t *tokens) Reset() { return } t.n = 0 - t.nLits = 0 + t.nFilled = 0 for i := range t.litHist[:] { t.litHist[i] = 0 } @@ -161,12 +160,12 @@ func (t *tokens) Fill() { for i, v := range t.litHist[:] { if v == 0 { t.litHist[i] = 1 - t.nLits++ + t.nFilled++ } } for i, v := range t.extraHist[:literalCount-256] { if v == 0 { - t.nLits++ + t.nFilled++ t.extraHist[i] = 1 } } @@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) { // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst *tokens, lit []byte) { - ol := int(dst.n) - for i, v := range lit { - dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + for _, v := range lit { + dst.tokens[dst.n] = token(v) dst.litHist[v]++ + dst.n++ } - dst.n += uint16(len(lit)) - dst.nLits += len(lit) } func (t *tokens) AddLiteral(lit byte) { t.tokens[t.n] = token(lit) t.litHist[lit]++ t.n++ - t.nLits++ } // from https://stackoverflow.com/a/28730362 @@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int { shannon := float32(0) bits := int(0) nMatches := 0 - if t.nLits > 0 { - invTotal := 1.0 / float32(t.nLits) + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) for _, v := range t.litHist[:] { if v > 0 { n := float32(v) @@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { } oCode := offsetCode(xoffset) xoffset |= oCode << 16 - t.nLits++ t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode]++ + t.offHist[oCode&31]++ t.tokens[t.n] = token(matchType | xlength< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength - t.nLits++ t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod new file mode 100644 index 000000000..cd8e6b29f --- /dev/null +++ b/vendor/github.com/klauspost/compress/go.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.17 diff --git a/vendor/github.com/willf/bitset/go.sum b/vendor/github.com/klauspost/compress/go.sum similarity index 100% rename from vendor/github.com/willf/bitset/go.sum rename to vendor/github.com/klauspost/compress/go.sum diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e886..504a7be9d 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d..ec71f7a34 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea..4dcab8d23 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 0823c928c..d9223a91e 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -161,6 +162,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error) return s.Out, false, nil } +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + func (s *Scratch) compress1X(src []byte) ([]byte, error) { return s.compress1xDo(s.Out, src) } @@ -225,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -268,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -292,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { m := uint32(0) if len(s.prevTable) > 0 { for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false } } return int(m), reuse } for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + s.symbolLen = uint16(i) + 1 } return int(m), false } @@ -331,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 41703bba4..42a237eac 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,13 +4,13 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -18,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq uint16 - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -34,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf } - return dst, br.close() + return &[4][256]byte{} } // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. @@ -341,41 +247,258 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - shift := (8 - d.actualTableLog) & 7 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) } - dst = append(dst, buf[:]...) } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -383,6 +506,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { // br < 4, so uint8 is fine bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + for bitsLeft > 0 { if br.bitsRead >= 64-8 { for br.off > 0 { @@ -393,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -401,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -420,33 +547,35 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 - const shift = 0 + const shift = 56 //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) for br.off >= 4 { br.fillFast() - v := dt[br.peekByteFast()>>shift] + v := dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+0] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+1] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+2] = uint8(v.entry >> 8) - v = dt[br.peekByteFast()>>shift] + v = dt[uint8(br.value>>shift)] br.advance(uint8(v.entry)) buf[off+3] = uint8(v.entry >> 8) off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -471,208 +601,20 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } - v := dt[br.peekByteFast()>>shift] + v := dt[br.peekByteFast()] nBits := uint8(v.entry) br.advance(nBits) bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -706,19 +648,18 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - shift := (8 - d.actualTableLog) & 7 + shift := (56 + (8 - d.actualTableLog)) & 63 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -728,120 +669,144 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -861,24 +826,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -914,18 +886,17 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - const shift = 0 + const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -935,98 +906,116 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { @@ -1034,21 +1023,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1068,24 +1063,32 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..ba7e8e6b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..8d2187a2c --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,846 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..908c17de6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 7ec2022b6..e8ad17ad0 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } @@ -245,6 +247,68 @@ func (c cTable) write(s *Scratch) error { return nil } +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + // estimateSize returns the estimated size in bytes of the input represented in the // histogram supplied. func (c cTable) estimateSize(hist []uint32) int { diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE similarity index 100% rename from vendor/github.com/golang/snappy/LICENSE rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go similarity index 88% rename from vendor/github.com/golang/snappy/decode.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode.go index f1e04b172..40796a49d 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" @@ -118,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -153,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -165,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -186,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -213,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -230,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go similarity index 98% rename from vendor/github.com/golang/snappy/decode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode_other.go index 2f672be55..77395a6b8 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm64 appengine !gc noasm - -package snappy +package snapref // decode writes the decoding of src to dst. It assumes that the varint-encoded // length of the decompressed bytes has already been read, and that len(dst) diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go similarity index 99% rename from vendor/github.com/golang/snappy/encode.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode.go index 7f2365707..13c6040a5 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go similarity index 98% rename from vendor/github.com/golang/snappy/encode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 296d7f0be..298c4f8e9 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm64 appengine !gc noasm - -package snappy +package snapref func load32(b []byte, i int) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. @@ -20,6 +18,7 @@ func load64(b []byte, i int) uint64 { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= len(lit) && len(lit) <= 65536 func emitLiteral(dst, lit []byte) int { @@ -44,6 +43,7 @@ func emitLiteral(dst, lit []byte) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= 65535 // 4 <= length && length <= 65535 @@ -91,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int { // src[i:i+k-j] and src[j:k] have the same contents. // // It assumes that: +// // 0 <= i && i < j && j <= len(src) func extendMatch(src []byte, i, j int) int { for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { @@ -107,8 +108,9 @@ func hash(u, shift uint32) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // The table element type is uint16, as s < sLimit and sLimit < len(src) diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go similarity index 96% rename from vendor/github.com/golang/snappy/snappy.go rename to vendor/github.com/klauspost/compress/internal/snapref/snappy.go index ece692ea4..34d01f4aa 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the Snappy compression format. It aims for very +// Package snapref implements the Snappy compression format. It aims for very // high speeds and reasonable compression. // // There are actually two Snappy formats: block and stream. They are related, @@ -17,7 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy // import "github.com/golang/snappy" +package snapref import ( "hash/crc32" diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 787813fa9..65b38abed 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + ## Installation Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. @@ -78,6 +80,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +109,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 7691 26.28 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,84 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80369488 1168 173.06 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 167273881 29337 62.13 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 275241169 36430 26.18 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -273,8 +289,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -283,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -293,9 +314,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -347,62 +371,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 854458537..97299d499 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -50,16 +51,16 @@ func (b *bitReader) getBits(n uint8) int { if n == 0 /*|| b.bitsRead >= 64 */ { return 0 } - return b.getBitsFast(n) + return int(b.get32BitsFast(n)) } -// getBitsFast requires that at least one bit is requested every time. +// get32BitsFast requires that at least one bit is requested every time. // There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { +func (b *bitReader) get32BitsFast(n uint8) uint32 { const regMask = 64 - 1 v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) b.bitsRead += n - return int(v) + return v } // fillFast() will make sure at least 32 bits are available. @@ -125,6 +126,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 303ae90f9..78b3c61be 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -38,7 +36,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) { b.nBits += bits } -// addBits32NC will add up to 32 bits. +// addBits32NC will add up to 31 bits. // It will not check if there is space for them, // so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits32NC(value uint32, bits uint8) { @@ -46,6 +44,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) { b.nBits += bits } +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -53,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6cea054d2..6b9929ddf 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,13 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +42,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -76,20 +80,28 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -109,13 +121,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -133,32 +140,50 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize } cSize = 1 case blockTypeCompressed: - if debug { + if debugDecoder { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debug { + if debugDecoder { printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -168,10 +193,10 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { - if b.lowMem { - b.dataStorage = make([]byte, 0, cSize) + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -179,7 +204,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { - if debug { + if debugDecoder { println("Reading block:", err, "(", cSize, ")", len(b.data)) printf("%T", br) } @@ -193,85 +218,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debug { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debug { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -279,7 +233,7 @@ func (b *blockDec) decodeBuf(hist *history) error { if b.lowMem { b.dst = make([]byte, b.RLESize) } else { - b.dst = make([]byte, maxBlockSize) + b.dst = make([]byte, maxCompressedBlockSize) } } b.dst = b.dst[:b.RLESize] @@ -294,14 +248,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) - if debug { + if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -311,30 +274,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -350,7 +301,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -361,7 +312,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -372,7 +323,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -382,7 +333,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -390,16 +341,18 @@ func (b *blockDec) decodeCompressed(hist *history) error { in = in[5:] } } - if debug { + if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -407,19 +360,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -428,45 +375,79 @@ func (b *blockDec) decodeCompressed(hist *history) error { literals[i] = v } in = in[1:] - if debug { + if debugDecoder { printf("Found %d RLE compressed literals\n", litRegenSize) } case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] in = in[litCompSize:] - if debug { + if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -475,27 +456,63 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } - if debug { + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -512,19 +529,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] + return ErrUnexpectedBlockSize } - var seqs = &sequenceDecs{} + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -532,12 +546,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { br := byteReader{b: in, off: 0} compMode := br.Uint8() br.advance(1) - if debug { + if debugDecoder { printf("Compression modes: 0b%b", compMode) } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debug { + if debugDecoder { println("Table", tableIndex(i), "is", mode) } var seq *sequenceDec @@ -553,6 +567,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -560,34 +577,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec - if debug { + seq.fse.setRLE(symb) + if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } - if debug { - println("Read table ok", "symbolLen:", dec.symbolLen) + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -597,140 +616,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } + return nil } - if huff != nil { - hist.huffTree = huff + br := seqs.br + if br == nil { + br = &bitReader{} } - if debug { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + if err := br.init(in); err != nil { + return err } - if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } } - return nil + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debug { - println("History merged ok") + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debug { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index e1be092f3..12e8f6f0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -51,7 +51,7 @@ func (b *blockEnc) init() { if cap(b.literals) < maxCompressedBlockSize { b.literals = make([]byte, 0, maxCompressedBlockSize) } - const defSeqs = 200 + const defSeqs = 2000 if cap(b.sequences) < defSeqs { b.sequences = make([]seq, 0, defSeqs) } @@ -156,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) { switch { case inBits < 5: lh |= (uint64(regenLen) << 3) | (1 << 60) - if debug { + if debugEncoder { got := int(lh>>3) & 0xff if got != regenLen { panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) @@ -184,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { lh |= 1 << 2 } lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debug { + if debugEncoder { const mmask = (1 << 24) - 1 n := (lh >> 4) & mmask if int(n&1023) != inLen { @@ -312,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) { bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output[:0]) b.output = append(b.output, a...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(a), "last:", b.last) } } @@ -325,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { bh.setType(blockTypeRaw) dst = bh.appendTo(dst) dst = append(dst, src...) - if debug { + if debugEncoder { println("Adding RAW block, length", len(src), "last:", b.last) } return dst @@ -339,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { // Don't compress extremely small blocks if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -371,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { switch err { case huff0.ErrIncompressible: - if debug { + if debugEncoder { println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) @@ -379,7 +379,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { b.output = append(b.output, lits...) return nil case huff0.ErrUseRLE: - if debug { + if debugEncoder { println("Adding RLE block, length", len(lits)) } bh.setType(blockTypeRLE) @@ -396,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { bh.setType(blockTypeCompressed) var lh literalsHeader if reUsed { - if debug { + if debugEncoder { println("Reused tree, compressed to", len(out)) } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) @@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int { return 0 } enc := fseEncoder{} - hist := enc.Histogram()[:256] + hist := enc.Histogram() maxSym := uint8(0) for i, v := range data { v = v & 63 @@ -517,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals...) - if debug { + if debugEncoder { println("Adding literals RAW, length", len(b.literals)) } case huff0.ErrUseRLE: @@ -525,22 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { lh.setSize(len(b.literals)) b.output = lh.appendTo(b.output) b.output = append(b.output, b.literals[0]) - if debug { + if debugEncoder { println("Adding literals RLE") } case nil: // Compressed litLen... if reUsed { - if debug { + if debugEncoder { println("reused tree") } lh.setType(literalsBlockTreeless) } else { - if debug { + if debugEncoder { println("new tree, size:", len(b.litEnc.OutTable)) } lh.setType(literalsBlockCompressed) - if debug { + if debugEncoder { _, _, err := huff0.ReadTable(out, nil) if err != nil { panic(err) @@ -548,18 +548,18 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } } lh.setSizes(len(out), len(b.literals), single) - if debug { + if debugEncoder { printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) println("Adding literal header:", lh) } b.output = lh.appendTo(b.output) b.output = append(b.output, out...) b.litEnc.Reuse = huff0.ReusePolicyAllow - if debug { + if debugEncoder { println("Adding literals compressed") } default: - if debug { + if debugEncoder { println("Adding literals ERROR:", err) } return err @@ -577,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { n := len(b.sequences) - 0x7f00 b.output = append(b.output, 255, uint8(n), uint8(n>>8)) } - if debug { + if debugEncoder { println("Encoding", len(b.sequences), "sequences") } b.genCodes() @@ -611,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { nSize = nSize + (nSize+2*8*16)>>4 switch { case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debug { + if debugEncoder { println("Using predefined", predefSize>>3, "<=", nSize>>3) } return preDef, compModePredefined case prevSize <= nSize: - if debug { + if debugEncoder { println("Using previous", prevSize>>3, "<=", nSize>>3) } return prev, compModeRepeat default: - if debug { + if debugEncoder { println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) } @@ -634,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if llEnc.useRLE { mode |= uint8(compModeRLE) << 6 llEnc.setRLE(b.sequences[0].llCode) - if debug { + if debugEncoder { println("llEnc.useRLE") } } else { @@ -645,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if ofEnc.useRLE { mode |= uint8(compModeRLE) << 4 ofEnc.setRLE(b.sequences[0].ofCode) - if debug { + if debugEncoder { println("ofEnc.useRLE") } } else { @@ -657,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if mlEnc.useRLE { mode |= uint8(compModeRLE) << 2 mlEnc.setRLE(b.sequences[0].mlCode) - if debug { + if debugEncoder { println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) } } else { @@ -666,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { mode |= uint8(m) << 2 } b.output = append(b.output, mode) - if debug { + if debugEncoder { printf("Compression modes: 0b%b", mode) } b.output, err = llEnc.writeCount(b.output) @@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) } seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } else { - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits< math.MaxUint16 { panic("can only encode up to 64K sequences") } // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() for i := range llH { llH[i] = 0 } @@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() { } var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { + for i := range b.sequences { + seq := &b.sequences[i] v := llCode(seq.litLen) seq.llCode = v llH[v]++ @@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) } } - b.sequences[i] = seq } maxCount := func(a []uint32) int { var max uint32 diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 17e820a6a..176788f25 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -7,7 +7,6 @@ package zstd import ( "fmt" "io" - "io/ioutil" ) type byteBuffer interface { @@ -23,7 +22,7 @@ type byteBuffer interface { readByte() (byte, error) // Skip n bytes. - skipN(n int) error + skipN(n int64) error } // in-memory buffer @@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) { return r, nil } -func (b *byteBuf) skipN(n int) error { +func (b *byteBuf) skipN(n int64) error { bb := *b - if len(bb) < n { + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { return io.ErrUnexpectedEOF } *b = bb[n:] @@ -91,7 +89,7 @@ func (r *readerWrapper) readSmall(n int) ([]byte, error) { if err == io.EOF { return nil, io.ErrUnexpectedEOF } - if debug { + if debugDecoder { println("readSmall: got", n2, "want", n, "err", err) } return nil, err @@ -113,6 +111,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { @@ -121,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) { return r.tmp[0], nil } -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { err = io.ErrUnexpectedEOF } return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17f..0e59a242d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 69736e8d4..f6a240970 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -4,7 +4,7 @@ package zstd import ( - "bytes" + "encoding/binary" "errors" "io" ) @@ -15,18 +15,50 @@ const HeaderMaxSize = 14 + 3 // Header contains information about the first frame and block within that. type Header struct { - // Window Size the window of data to keep while decoding. - // Will only be set if HasFCS is false. - WindowSize uint64 + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool - // Frame content size. - // Expected size of the entire frame. - FrameContentSize uint64 + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 // Dictionary ID. // If 0, no dictionary. DictionaryID uint32 + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + // First block information. FirstBlock struct { // OK will be set if first block could be decoded. @@ -51,17 +83,9 @@ type Header struct { CompressedSize int } - // Skippable will be true if the frame is meant to be skipped. - // No other information will be populated. - Skippable bool - // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. HasCheckSum bool - - // If this is true FrameContentSize will have a valid value - HasFCS bool - - SingleSegment bool } // Decode the header from the beginning of the stream. @@ -71,39 +95,46 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + *h = Header{} if len(in) < 4 { return io.ErrUnexpectedEOF } + h.HeaderSize += 4 b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } - *h = Header{Skippable: true} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) return nil } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { return io.ErrUnexpectedEOF } - - // Clear output - *h = Header{} fhd, in := in[0], in[1:] + h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { return errors.New("reserved bit set on frame header") } - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if !h.SingleSegment { if len(in) < 1 { return io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] + h.HeaderSize++ windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) @@ -120,10 +151,8 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:size], in[size:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch size { + h.HeaderSize += int(size) + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -152,10 +181,8 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch fcsSize { + h.HeaderSize += int(fcsSize) + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f593e464b..30459cd3f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,12 @@ package zstd import ( - "errors" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +25,20 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -113,9 +127,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { // Returns the number of bytes written and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } var n int for { if len(d.current.b) > 0 { @@ -133,12 +144,12 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } if len(d.current.b) > 0 { - if debug { + if debugDecoder { println("returning", n, "still bytes left:", len(d.current.b)) } // Only return error at end of block @@ -147,7 +158,7 @@ func (d *Decoder) Read(p []byte) (int, error) { if d.current.err != nil { d.drainOutput() } - if debug { + if debugDecoder { println("returning", n, d.current.err, len(d.decoders)) } return n, d.current.err @@ -165,67 +176,80 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } d.current.flushed = true return nil } - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - - // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { bb2 := bb - if debug { + if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } b := bb2.Bytes() var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] } - dst, err := d.DecodeAll(b, dst[:0]) + dst, err := d.DecodeAll(b, dst) if err == nil { err = io.EOF } + // Save output buffer + d.syncStream.dstBuf = dst d.current.b = dst d.current.err = err d.current.flushed = true - if debug { + if debugDecoder { println("sync decode to", len(dst), "bytes, err:", err) } return nil } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { - if debug { + if debugDecoder { printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) } d.decoders <- d.current.d @@ -238,34 +262,29 @@ func (d *Decoder) drainOutput() { } for v := range d.current.output { if v.d != nil { - if debug { + if debugDecoder { printf("re-adding decoder %p", v.d) } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. // The return value n is the number of bytes written. // Any error encountered during the write is also returned. func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } var n int64 for { if len(d.current.b) > 0 { n2, err2 := w.Write(d.current.b) n += int64(n2) - if err2 != nil && d.current.err == nil { + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { d.current.err = err2 - break + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite } } if d.current.err != nil { @@ -289,19 +308,23 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } // Grab a block decoder and frame decoder. block := <-d.decoders frame := block.localFrame + initialSize := len(dst) defer func() { - if debug { + if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -309,34 +332,52 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debug { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } - if cap(dst) == 0 { + + if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. size := len(input) * 2 @@ -354,8 +395,11 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if err != nil { return dst, err } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } if len(frame.bBuf) == 0 { - if debug { + if debugDecoder { println("frame dbuf empty") } break @@ -370,33 +414,178 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debug { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false } + d.current.b = d.current.b[:0] + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } - if debug { - println("got", len(d.current.b), "bytes, error:", d.current.err) + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -404,10 +593,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -458,100 +647,309 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debug { - println("got new stream") + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debug && err != nil { - println("Frame decoder returned", err) + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block } - break + return } - if debug { - println("starting frame decoder") + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return } - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index c0fd058c2..f42448e69 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -14,19 +14,28 @@ type DOption func(*decoderOptions) error // options retains accumulated state of multiple options. type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - dicts []dict + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -35,16 +44,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -52,8 +70,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// For streaming operations, the maximum window size is capped at 1<<30 bytes. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -81,3 +98,52 @@ func WithDecoderDicts(dicts ...[]byte) DOption { return nil } } + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index fa25a18d8..b2725f77b 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,7 +1,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -20,7 +19,7 @@ type dict struct { content []byte } -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +const dictMagic = "\x37\xa4\x30\xec" // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { @@ -50,7 +49,7 @@ func loadDict(b []byte) (*dict, error) { ofDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}}, } - if !bytes.Equal(b[:4], dictMagic[:]) { + if string(b[:4]) != dictMagic { return nil, ErrMagicMismatch } d.id = binary.LittleEndian.Uint32(b[4:8]) @@ -82,7 +81,7 @@ func loadDict(b []byte) (*dict, error) { println("Transform table error:", err) return err } - if debug { + if debugDecoder || debugEncoder { println("Read table ok", "symbolLen:", dec.symbolLen) } // Set decoders as predefined so they aren't reused. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 60f298648..bfb2e146c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -16,6 +16,7 @@ type fastBase struct { cur int32 // maximum offset. Should be at least 2x block size. maxMatchOff int32 + bufferReset int32 hist []byte crc *xxhash.Digest tmp [8]byte @@ -38,8 +39,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // WindowSize returns the window size of the encoder, // or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { b := int32(1) << uint(bits.Len(uint(size))) // Keep minimum window. if b < 1024 { @@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc { } func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -108,11 +109,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) { e.blk = enc } -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - func (e *fastBase) matchlen(s, t int32, src []byte) int32 { if debugAsserts { if s < 0 { @@ -131,8 +127,6 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } - - // Extend the match to be as long as possible. return int32(matchLen(src[s:], src[t:])) } @@ -161,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { // We offset current position so everything will be out of reach. // If above reset line, history will be purged. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += e.maxMatchOff + int32(len(e.hist)) } e.hist = e.hist[:0] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index dc1eed5f0..830f5ba74 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -5,22 +5,62 @@ package zstd import ( + "bytes" "fmt" - "math/bits" + + "github.com/klauspost/compress" ) const ( - bestLongTableBits = 20 // Bits used in the long match table + bestLongTableBits = 22 // Bits used in the long match table bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the // long match table and match offsets are quite big. // This greatly depends on the type of input. - bestShortTableBits = 16 // Bits used in the short match table + bestShortTableBits = 18 // Bits used in the short match table bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + ) +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + // bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. // The long match table contains the previous entry with the same hash, // effectively making it a "chain" of length 2. @@ -45,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -109,6 +145,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { return } + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + // Override src src = e.hist sLimit := int32(len(src)) - inputMargin @@ -132,7 +176,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { } _ = addLiterals - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -145,51 +189,53 @@ encodeLoop: panic("offset0 was 0") } - type match struct { - offset int32 - s int32 - length int32 - rep int32 - } - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{offset: offset, s: s} - } - return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - } - - bestOf := func(a, b match) match { - aScore := b.s - a.s + a.length - bScore := a.s - b.s + b.length - if a.rep < 0 { - aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 - } - if b.rep < 0 { - bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 - } - if aScore >= bScore { + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { return a } return b } const goodEnough = 100 - nextHashL := hash8(cv, bestLongTableBits) - nextHashS := hash4x64(cv, bestShortTableBits) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + if canRepeat && best.length < goodEnough { - best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) - best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) - best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) + cv32 := uint32(cv >> 8) + spp := s + 1 + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) if best.length > 0 { - best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) - best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) - best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + cv32 = uint32(cv >> 24) + spp += 2 + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) } } // Load next and check... @@ -209,33 +255,52 @@ encodeLoop: } s++ - candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] cv = load6432(src, s) cv2 := load6432(src, s+1) - candidateL = e.longTable[hash8(cv, bestLongTableBits)] - candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] - - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + // Long at s+1, s+2 + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + if false { + // Short at s+3. + // Too often worse... + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) + } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hash8(load6432(src, sAt), bestLongTableBits) + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) } best = bestEnd } } } + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + // We have a match, we can store the forward value if best.rep > 0 { s = best.s @@ -274,7 +339,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, best.length) } @@ -284,8 +349,8 @@ encodeLoop: off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} off++ @@ -311,7 +376,7 @@ encodeLoop: panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } - if debugAsserts && canRepeat && int(offset1) > len(src) { + if debugAsserts && int(offset1) > len(src) { panic("invalid offset") } @@ -352,8 +417,8 @@ encodeLoop: // every entry for index0 < s-1 { cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} @@ -374,8 +439,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash4x64(cv, bestShortTableBits) - nextHashL := hash8(cv, bestLongTableBits) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -412,7 +477,7 @@ encodeLoop: blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[2] = uint32(offset3) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -425,7 +490,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { e.Encode(blk, src) } -// ResetDict will reset and set a dictionary if not nil +// Reset will reset and set a dictionary if not nil func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { e.resetBase(d, singleBlock) if d == nil { @@ -441,10 +506,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { const hashLog = bestShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash4x64(cv, hashLog) // 0 -> 4 - nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 e.dictTable[nextHash] = prevEntry{ prev: e.dictTable[nextHash].offset, offset: i, @@ -472,7 +537,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, bestLongTableBits) + h := hashLen(cv, bestLongTableBits, bestLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -482,7 +547,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, bestLongTableBits) + h := hashLen(cv, bestLongTableBits, bestLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 604954290..8582f31a7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -9,6 +9,7 @@ import "fmt" const ( betterLongTableBits = 19 // Bits used in the long match table betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash // Note: Increasing the short table bits or making the hash shorter // can actually lead to compression degradation since it will 'steal' more from the @@ -16,6 +17,7 @@ const ( // This greatly depends on the type of input. betterShortTableBits = 13 // Bits used in the short match table betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard @@ -60,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -138,7 +136,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -154,8 +152,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -204,7 +202,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -214,10 +212,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } cv = load6432(src, s) @@ -264,7 +262,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -275,10 +273,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } cv = load6432(src, s) @@ -353,7 +351,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur @@ -413,16 +411,24 @@ encodeLoop: } // Try to find a better match by searching for a long match at the end of the current best match - if true && s+matched < sLimit { - nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) - cv := load3232(src, s) + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("long match at end-of-match") @@ -432,12 +438,13 @@ encodeLoop: // Check prev long... if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("prev long match at end-of-match") @@ -495,10 +502,10 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 } @@ -516,8 +523,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -553,7 +560,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -576,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -656,7 +663,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -672,8 +679,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -724,7 +731,7 @@ encodeLoop: nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -734,11 +741,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -787,7 +794,7 @@ encodeLoop: s += lenght + repOff2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -798,11 +805,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -879,7 +886,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) candidateL = e.longTable[nextHashL] coffsetL = candidateL.offset - e.cur @@ -940,7 +947,7 @@ encodeLoop: } // Try to find a better match by searching for a long match at the end of the current best match if s+matched < sLimit { - nextHashL := hash8(load6432(src, s+matched), betterLongTableBits) + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) cv := load3232(src, s) candidateL := e.longTable[nextHashL] coffsetL := candidateL.offset - e.cur - matched @@ -1021,11 +1028,11 @@ encodeLoop: for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 @@ -1045,8 +1052,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1084,7 +1091,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -1113,10 +1120,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { const hashLog = betterShortTableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash5(cv, hashLog) // 0 -> 4 - nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -1145,7 +1152,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: e.maxMatchOff, prev: e.dictLongTable[h].offset, @@ -1155,7 +1162,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off := 8 // First to read for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, betterLongTableBits) + h := hashLen(cv, betterLongTableBits, betterLongLen) e.dictLongTable[h] = prevEntry{ offset: i, prev: e.dictLongTable[h].offset, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 8629d43d8..7d425109a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -10,6 +10,7 @@ const ( dFastLongTableBits = 17 // Bits used in the long match table dFastLongTableSize = 1 << dFastLongTableBits // Size of the table dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard @@ -17,6 +18,8 @@ const ( dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + ) type doubleFastEncoder struct { @@ -41,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -109,7 +108,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -124,8 +123,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -170,7 +169,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -208,7 +207,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -304,16 +303,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -330,8 +329,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -368,7 +367,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -385,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -427,7 +426,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -436,8 +435,8 @@ encodeLoop: var t int32 for { - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -483,7 +482,7 @@ encodeLoop: s += length + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -521,7 +520,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -614,16 +613,16 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 cv0 >>= 8 cv1 >>= 8 te0.offset++ te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 cv = load6432(src, s) @@ -640,8 +639,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -677,12 +676,12 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -697,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -767,7 +766,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -782,8 +781,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -830,7 +829,7 @@ encodeLoop: s += lenght + repOff nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, lenght) } @@ -868,7 +867,7 @@ encodeLoop: // See if we can find a long match at s+1 const checkAt = 1 cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) candidateL = e.longTable[nextHashL] coffsetL = s - (candidateL.offset - e.cur) + checkAt @@ -965,8 +964,8 @@ encodeLoop: cv1 := load6432(src, index1) te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hash8(cv0, dFastLongTableBits) - longHash2 := hash8(cv0, dFastLongTableBits) + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -977,8 +976,8 @@ encodeLoop: te1.offset++ te0.val = uint32(cv0) te1.val = uint32(cv1) - hashVal1 := hash5(cv0, dFastShortTableBits) - hashVal2 := hash5(cv1, dFastShortTableBits) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) e.table[hashVal1] = te0 e.markShardDirty(hashVal1) e.table[hashVal2] = te1 @@ -999,8 +998,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1039,7 +1038,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // If we encoded more than 64K mark all dirty. @@ -1071,14 +1070,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if len(d.content) >= 8 { cv := load6432(d.content, 0) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: e.maxMatchOff, } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff + 1; i < end; i++ { cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ val: uint32(cv), offset: i, } @@ -1100,7 +1099,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) for i := range e.longTableShardDirty { e.longTableShardDirty[i] = false } @@ -1111,7 +1111,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + e.longTableShardDirty[i] = false } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index ba4a17e10..315b1a8f2 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -6,17 +6,16 @@ package zstd import ( "fmt" - "math" - "math/bits" ) const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 ) type tableEntry struct { @@ -44,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -86,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -103,7 +102,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -122,8 +121,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -135,20 +134,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -178,7 +164,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -235,20 +221,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -285,23 +258,10 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -330,7 +290,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -343,14 +303,14 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { inputMargin = 8 minNonLiteralBlockSize = 1 + 1 + inputMargin ) - if debug { - if len(src) > maxBlockSize { + if debugEncoder { + if len(src) > maxCompressedBlockSize { panic("src too big") } } // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -374,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -391,7 +351,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -405,8 +365,8 @@ encodeLoop: // By not using them for the first 3 matches for { - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -417,21 +377,7 @@ encodeLoop: if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -462,7 +408,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -521,21 +467,7 @@ encodeLoop: panic(fmt.Sprintf("t (%d) < 0 ", t)) } // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -572,24 +504,10 @@ encodeLoop: if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} seq.matchLen = uint32(l) - zstdMinMatch seq.litLen = 0 @@ -616,11 +534,11 @@ encodeLoop: blk.literals = append(blk.literals, src[nextEmit:]...) blk.extraLits = len(src) - int(nextEmit) } - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -637,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { return } // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } + e.table = [tableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -696,7 +612,7 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - if debug { + if debugEncoder { println("recent offsets:", blk.recentOffsets) } @@ -715,8 +631,8 @@ encodeLoop: panic("offset0 was 0") } - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) candidate := e.table[nextHash] candidate2 := e.table[nextHash2] repIndex := s - offset1 + 2 @@ -730,19 +646,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -773,7 +677,7 @@ encodeLoop: s += length + 2 nextEmit = s if s >= sLimit { - if debug { + if debugEncoder { println("repeat ended", s, length) } @@ -830,20 +734,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -880,23 +771,10 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. - nextHash := hash6(cv, hashLog) + nextHash := hashLen(cv, hashLog, tableFastHashLen) e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} e.markShardDirty(nextHash) seq.matchLen = uint32(l) - zstdMinMatch @@ -926,7 +804,7 @@ encodeLoop: } blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[1] = uint32(offset2) - if debug { + if debugEncoder { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } } @@ -957,9 +835,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash6(cv, hashLog) // 0 -> 5 - nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 - nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -991,7 +869,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const shardCnt = tableShardCnt const shardSize = tableShardSize if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) for i := range e.tableShardDirty { e.tableShardDirty[i] = false } @@ -1003,7 +882,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) e.tableShardDirty[i] = false } e.allDirty = false diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 4871dd03a..65c6c36dc 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "fmt" "io" + "math" rdebug "runtime/debug" "sync" @@ -33,7 +34,7 @@ type encoder interface { Block() *blockEnc CRC() *xxhash.Digest AppendCRC([]byte) []byte - WindowSize(size int) int32 + WindowSize(size int64) int32 UseBlock(*blockEnc) Reset(d *dict, singleBlock bool) } @@ -48,6 +49,8 @@ type encoderState struct { err error writeErr error nWritten int64 + nInput int64 + frameContentSize int64 headerWritten bool eofWritten bool fullFrameWritten bool @@ -96,23 +99,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -120,7 +125,21 @@ func (e *Encoder) Reset(w io.Writer) { s.w = w s.err = nil s.nWritten = 0 + s.nInput = 0 s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } } // Write data to the encoder. @@ -190,6 +209,7 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) s.current = s.current[:0] s.filling = s.filling[:0] s.headerWritten = true @@ -200,8 +220,8 @@ func (e *Encoder) nextBlock(final bool) error { var tmp [maxHeaderSize]byte fh := frameHeader{ - ContentSize: 0, - WindowSize: uint32(s.encoder.WindowSize(0)), + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), SingleSegment: false, Checksum: e.o.crc, DictID: e.o.dict.ID(), @@ -241,11 +261,52 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) s.wg.Add(1) go func(src []byte) { - if debug { + if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) } defer func() { @@ -290,7 +351,7 @@ func (e *Encoder) nextBlock(final bool) error { } switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } blk.encodeRaw(src) @@ -313,7 +374,7 @@ func (e *Encoder) nextBlock(final bool) error { // // The Copy function uses ReaderFrom if available. func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debug { + if debugEncoder { println("Using ReadFrom") } @@ -336,20 +397,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { switch err { case io.EOF: e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debug { + if debugEncoder { println("ReadFrom: got EOF final block:", len(e.state.filling)) } return n, nil case nil: default: - if debug { + if debugEncoder { println("ReadFrom: got error:", err) } e.state.err = err return n, err } if len(src) > 0 { - if debug { + if debugEncoder { println("ReadFrom: got space left in source:", len(src)) } continue @@ -394,6 +455,11 @@ func (e *Encoder) Close() error { if err != nil { return err } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } if e.state.fullFrameWritten { return s.err } @@ -463,14 +529,14 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If a non-single block is needed the encoder will reset again. e.encoders <- enc }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } fh := frameHeader{ ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(len(src))), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), SingleSegment: single, Checksum: e.o.crc, DictID: e.o.dict.ID(), @@ -486,7 +552,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { @@ -512,7 +578,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, src) @@ -548,7 +614,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { switch err { case errIncompressible: - if debug { + if debugEncoder { println("Storing incompressible block as raw") } dst = blk.encodeRawTo(dst, todo) @@ -574,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 16d4ab63c..6015f498a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -3,6 +3,7 @@ package zstd import ( "errors" "fmt" + "math" "runtime" "strings" ) @@ -24,6 +25,7 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + customBlockSize bool lowMem bool dict *dict } @@ -33,7 +35,7 @@ func (o *encoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, - blockSize: 1 << 16, + blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, @@ -46,22 +48,22 @@ func (o encoderOptions) encoder() encoder { switch o.level { case SpeedFastest: if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedDefault: if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} case SpeedBetterCompression: if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -75,6 +77,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -106,6 +109,7 @@ func WithWindowSize(n int) EOption { o.customWindow = true if o.blockSize > o.windowSize { o.blockSize = o.windowSize + o.customBlockSize = true } return nil } @@ -188,10 +192,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel { return SpeedDefault case level >= 6 && level < 10: return SpeedBetterCompression - case level >= 10: - return SpeedBetterCompression + default: + return SpeedBestCompression } - return SpeedDefault } // String provides a string representation of the compression level. @@ -222,6 +225,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { switch o.level { case SpeedFastest: o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: @@ -278,7 +284,7 @@ func WithNoEntropyCompression(b bool) EOption { // a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. // For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 4dc151213..65984bf07 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -5,30 +5,20 @@ package zstd import ( - "bytes" + "encoding/binary" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // maxWindowSize is the maximum windows size to support. - // should never be bigger than max-int. - maxWindowSize uint64 - - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -38,35 +28,32 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( - // The minimum Window_Size is 1 KB. + // MinWindowSize is the minimum Window Size, which is 1 KB. MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. MaxWindowSize = 1 << 29 ) -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" ) func newFrameDec(o decoderOptions) *frameDec { - d := frameDec{ - o: o, - maxWindowSize: MaxWindowSize, + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize } - if d.maxWindowSize > o.maxDecodedSize { - d.maxWindowSize = o.maxDecodedSize + d := frameDec{ + o: o, } return &d } @@ -78,20 +65,33 @@ func newFrameDec(o decoderOptions) *frameDec { func (d *frameDec) reset(br byteBuffer) error { d.HasCheckSum = false d.WindowSize = 0 - var b []byte + var signature [4]byte for { var err error - b, err = br.readSmall(4) + // Check if we can read more... + b, err := br.readSmall(1) switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF default: return err case nil: + signature[0] = b[0] } - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - if debug { - println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) } // Break if not skippable frame. break @@ -99,28 +99,34 @@ func (d *frameDec) reset(br byteBuffer) error { // Read size to skip b, err = br.readSmall(4) if err != nil { - println("Reading Frame Size", err) + if debugDecoder { + println("Reading Frame Size", err) + } return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err = br.skipN(int(n)) + err = br.skipN(int64(n)) if err != nil { - if debug { + if debugDecoder { println("Reading discarded frame", err) } return err } } - if !bytes.Equal(b, frameMagic) { - println("Got magic numbers: ", b, "want:", frameMagic) + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } return ErrMagicMismatch } // Read Frame_Header_Descriptor fhd, err := br.readByte() if err != nil { - println("Reading Frame_Header_Descriptor", err) + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } return err } d.SingleSegment = fhd&(1<<5) != 0 @@ -135,7 +141,9 @@ func (d *frameDec) reset(br byteBuffer) error { if !d.SingleSegment { wd, err := br.readByte() if err != nil { - println("Reading Window_Descriptor", err) + if debugDecoder { + println("Reading Window_Descriptor", err) + } return err } printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) @@ -153,13 +161,13 @@ func (d *frameDec) reset(br byteBuffer) error { size = 4 } - b, err = br.readSmall(int(size)) + b, err := br.readSmall(int(size)) if err != nil { println("Reading Dictionary_ID", err) return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -167,7 +175,7 @@ func (d *frameDec) reset(br byteBuffer) error { case 4: id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) } - if debug { + if debugDecoder { println("Dict size", size, "ID:", id) } if id > 0 { @@ -189,14 +197,14 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { - b, err = br.readSmall(fcsSize) + b, err := br.readSmall(fcsSize) if err != nil { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: @@ -209,10 +217,11 @@ func (d *frameDec) reset(br byteBuffer) error { d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } - if debug { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + if debugDecoder { + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -222,29 +231,52 @@ func (d *frameDec) reset(br byteBuffer) error { d.crc.Reset() } + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. d.WindowSize = d.FrameContentSize if d.WindowSize < MinWindowSize { d.WindowSize = MinWindowSize } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } } - if d.WindowSize > d.maxWindowSize { - printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) - return ErrWindowSizeExceeded - } // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { - println("got window size: ", d.WindowSize) + if debugDecoder { + println("got window size: ", d.WindowSize) + } return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -252,243 +284,149 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { - if debug { - printf("decoding new block %p:%p", block, block.data) + if debugDecoder { + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debug { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) + buf, err := d.rawInput.readSmall(4) if err != nil { println("CRC missing?", err) return err } - if !bytes.Equal(tmp[:], want) { - if debug { - println("CRC Check Failed:", tmp[:], "!=", want) + if d.o.ignoreChecksum { + return nil + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) } return ErrCRCMismatch } - if debug { - println("CRC ok", tmp[:]) + if debugDecoder { + printf("CRC ok %08x\n", got) } return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 10MB. - d.history.maxSize = d.history.windowSize + maxBlockSize*5 - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debug { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debug { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debug { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debug { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } -// runDecoder will create a sync decoder that will decode a block of data. +// runDecoder will run the decoder for the remainder of the frame. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) if err != nil { break } - if debug { + if debugDecoder { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded break } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index e6d3d49b3..2f8860a72 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..d04a829b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..bcde39869 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..332e51fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index c74681b99..ab26326a8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -62,9 +62,8 @@ func (s symbolTransform) String() string { // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. @@ -77,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { @@ -229,7 +213,7 @@ func (s *fseEncoder) setRLE(val byte) { deltaFindState: 0, deltaNbBits: 0, } - if debug { + if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val @@ -710,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.state = c.stateTable[lu] } -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index 4a752067f..5d73c21eb 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -13,65 +13,23 @@ const ( prime8bytes = 0xcf1bbcdcb7a56463 ) -// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. -// l must be >=4 and <=8. Any other value will return hash for 4 bytes. -// h should always be <32. -// Preferably h and l should be a constant. -// FIXME: This does NOT get resolved, if 'mls' is constant, -// so this cannot be used. -func hashLen(u uint64, hashLog, mls uint8) uint32 { +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) case 5: - return hash5(u, hashLog) + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) case 6: - return hash6(u, hashLog) + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) case 7: - return hash7(u, hashLog) + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) case 8: - return hash8(u, hashLog) + return uint32((u * prime8bytes) >> (64 - length)) default: - return hash4x64(u, hashLog) + return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) -} - -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d2..09164856d 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,40 +10,48 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) + h.huffTree = nil } } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) } func (h *history) setDict(dict *dict) { @@ -54,6 +62,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +92,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md index 69aa3bb58..777290d44 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -2,12 +2,7 @@ VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 426b9cac7..fc40c8200 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -18,19 +18,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -52,10 +44,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 @@ -195,7 +188,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index 2c9c5357a..ddb63aa91 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,215 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm // +build !appengine // +build gc // +build !purego +// +build !noasm #include "textflag.h" -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ arg1_base+8(FP), CX - MOVQ arg1_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ arg+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ arg1_base+8(FP), CX - MOVQ CX, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..17901e080 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go similarity index 54% rename from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 35318d7c4..d4221edf4 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -1,6 +1,9 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego +// +build !noasm package xxhash @@ -10,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(*Digest, []byte) int +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 4a5a82160..0be16cefc 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 1dd39e63b..f833d1541 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,142 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +func (s *sequenceDecs) freeDecoders() { + if f := s.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.litLengths.fse = nil + } + if f := s.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.offsets.fse = nil + } + if f := s.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.matchLengths.fse = nil + } +} + +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +287,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +312,52 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +368,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +400,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -278,7 +413,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { mlState = mlTable[mlState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask] } else { - bits := br.getBitsFast(nBits) + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -291,19 +427,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) + // Add final literals + s.out = append(out, s.literals...) + return br.close() } var bitMask [16]uint16 @@ -314,87 +445,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() @@ -457,36 +507,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..191384adf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..b94993a07 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4079 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..ac2a80d29 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 9d9d1d567..9e1baad73 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -10,8 +10,8 @@ import ( "hash/crc32" "io" - "github.com/golang/snappy" "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" ) const ( @@ -203,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { written += int64(n) continue case chunkTypeUncompressedData: - if debug { + if debugEncoder { println("Uncompressed, chunklen", chunkLen) } // Section 4.3. Uncompressed data (chunk type 0x01). @@ -246,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { continue case chunkTypeStreamIdentifier: - if debug { + if debugEncoder { println("stream id", chunkLen, len(snappyMagicBody)) } // Section 4.1. Stream identifier (chunk type 0xff). diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 9325b928a..29c15c8c4 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -18,36 +18,58 @@ const ZipMethodWinZip = 93 // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 -var zipReaderPool sync.Pool +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,15 +79,16 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err } type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool } func (w *pooledZipWriter) Write(p []byte) (n int, err error) { @@ -83,7 +106,7 @@ func (w *pooledZipWriter) Close() error { var err error if w.enc != nil { err = w.enc.Close() - zipReaderPool.Put(w.enc) + w.pool.Put(w.enc) w.enc = nil } return err @@ -104,18 +127,15 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { return nil, err } } - return &pooledZipWriter{enc: enc}, nil + return &pooledZipWriter{enc: enc, pool: &pool}, nil } } // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 1ba308c8b..b1886f7c7 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -15,6 +15,12 @@ import ( // enable debug printing const debug = false +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + // Enable extra assertions. const debugAsserts = debug || false @@ -30,8 +36,8 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 var ( // ErrReservedBlockType is returned when a reserved block type is found. @@ -46,6 +52,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -69,6 +79,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") @@ -82,48 +96,36 @@ var ( ) func println(a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Println(a...) } } func printf(format string, a ...interface{}) { - if debug { + if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - -// matchLen returns the maximum length. +// matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 } + n += 8 } - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] for i := range a { if a[i] != b[i] { - return i + checked + break } + n++ } - return len(a) + checked + return n + } func load3232(b []byte, i int32) uint32 { @@ -134,10 +136,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - type byter interface { Bytes() []byte Len() int diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go index 01afd94d1..1b42a0017 100644 --- a/vendor/github.com/mattn/go-shellwords/shellwords.go +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -232,7 +232,7 @@ loop: case '\'': if !doubleQuoted && !dollarQuote { if singleQuoted { - got = argSingle + got = argQuoted } singleQuoted = !singleQuoted continue diff --git a/vendor/github.com/mistifyio/go-zfs/.gitignore b/vendor/github.com/mistifyio/go-zfs/.gitignore deleted file mode 100644 index 8000dd9db..000000000 --- a/vendor/github.com/mistifyio/go-zfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.vagrant diff --git a/vendor/github.com/mistifyio/go-zfs/.travis.yml b/vendor/github.com/mistifyio/go-zfs/.travis.yml deleted file mode 100644 index acbd39cef..000000000 --- a/vendor/github.com/mistifyio/go-zfs/.travis.yml +++ /dev/null @@ -1,43 +0,0 @@ -language: go -dist: trusty -sudo: required -cache: - directories: - - $HOME/.ccache - - $HOME/zfs - -branches: - only: - - master - -env: - - rel=0.6.5.11 - - rel=0.7.6 - -go: - - "1.10.x" - - master - -before_install: - - export MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1)) - - export PATH=/usr/lib/ccache:$PATH - - go get github.com/alecthomas/gometalinter - - gometalinter --install --update - - sudo apt-get update -y && sudo apt-get install -y libattr1-dev libblkid-dev linux-headers-$(uname -r) tree uuid-dev - - mkdir -p $HOME/zfs - - cd $HOME/zfs - - [[ -d spl-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz - - [[ -d zfs-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz - - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install) - - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install) - - sudo modprobe zfs - - cd $TRAVIS_BUILD_DIR - -script: - - sudo -E $(which go) test -v ./... - - gometalinter --vendor --vendored-linters ./... || true - - gometalinter --errors --vendor --vendored-linters ./... - -notifications: - email: false - irc: "chat.freenode.net#cerana" diff --git a/vendor/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/Vagrantfile deleted file mode 100644 index 3bd6e120b..000000000 --- a/vendor/github.com/mistifyio/go-zfs/Vagrantfile +++ /dev/null @@ -1,34 +0,0 @@ - -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" - config.ssh.forward_agent = true - - config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true - - config.vm.provision "shell", inline: < /etc/profile.d/go.sh -export GOPATH=\\$HOME/go -export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH -END - -chown -R vagrant /home/vagrant/go - -apt-get update -apt-get install -y software-properties-common curl -apt-add-repository --yes ppa:zfs-native/stable -apt-get update -apt-get install -y ubuntu-zfs - -cd /home/vagrant -curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz -tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz - -cat << END > /etc/sudoers.d/go -Defaults env_keep += "GOPATH" -END - -EOF - -end diff --git a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go deleted file mode 100644 index a46f73060..000000000 --- a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !solaris - -package zfs - -import ( - "strings" -) - -// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform -var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"} - -var dsPropListOptions = strings.Join(dsPropList, ",") - -// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform -var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} -var zpoolPropListOptions = strings.Join(zpoolPropList, ",") -var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go deleted file mode 100644 index 0a7e90f22..000000000 --- a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build solaris - -package zfs - -import ( - "strings" -) - -// List of ZFS properties to retrieve from zfs list command on a Solaris platform -var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"} - -var dsPropListOptions = strings.Join(dsPropList, ",") - -// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform -var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} -var zpoolPropListOptions = strings.Join(zpoolPropList, ",") -var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v3/.envrc new file mode 100644 index 000000000..f310aea66 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.envrc @@ -0,0 +1,4 @@ +has nix && use nix +dotenv_if_exists +PATH_add bin +path_add GOBIN bin diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore new file mode 100644 index 000000000..0867490ad --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore @@ -0,0 +1,6 @@ +bin +go-zfs.test +.vagrant + +# added by lint-install +out/ diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml new file mode 100644 index 000000000..499c3eca1 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml @@ -0,0 +1,207 @@ +run: + # The default runtime timeout is 1m, which doesn't work well on Github Actions. + timeout: 4m + +# NOTE: This file is populated by the lint-install tool. Local adjustments may be overwritten. +linters-settings: + cyclop: + # NOTE: This is a very high transitional threshold + max-complexity: 37 + package-average: 34.0 + skip-tests: true + + gocognit: + # NOTE: This is a very high transitional threshold + min-complexity: 98 + + dupl: + threshold: 200 + + goconst: + min-len: 4 + min-occurrences: 5 + ignore-tests: true + + gosec: + excludes: + - G107 # Potential HTTP request made with variable url + - G204 # Subprocess launched with function call as argument or cmd arguments + - G404 # Use of weak random number generator (math/rand instead of crypto/rand + + errorlint: + # these are still common in Go: for instance, exit errors. + asserts: false + + exhaustive: + default-signifies-exhaustive: true + + nestif: + min-complexity: 8 + + nolintlint: + require-explanation: true + allow-unused: false + require-specific: true + + revive: + ignore-generated-header: true + severity: warning + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: confusing-naming + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: range-val-in-closure + - name: range-val-address + - name: dot-imports + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: struct-tag + - name: time-naming + - name: unexported-naming + - name: unexported-return + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: unconditional-recursion + - name: waitgroup-by-value + + staticcheck: + go: "1.16" + + unused: + go: "1.16" + +output: + sort-results: true + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - cyclop + - deadcode + - dogsled + - dupl + - durationcheck + - errcheck + - errname + - errorlint + - exhaustive + - exportloopref + - forcetypeassert + - gocognit + - goconst + - gocritic + - godot + - gofmt + - gofumpt + - gosec + - goheader + - goimports + - goprintffuncname + - gosimple + - govet + - ifshort + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - predeclared + # disabling for the initial iteration of the linting tool + # - promlinter + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - wastedassign + - whitespace + + # Disabled linters, due to being misaligned with Go practices + # - exhaustivestruct + # - gochecknoglobals + # - gochecknoinits + # - goconst + # - godox + # - goerr113 + # - gomnd + # - lll + # - nlreturn + # - testpackage + # - wsl + # Disabled linters, due to not being relevant to our code base: + # - maligned + # - prealloc "For most programs usage of prealloc will be a premature optimization." + # Disabled linters due to bad error messages or bugs + # - tagliatelle + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - dupl + - errcheck + - forcetypeassert + - gocyclo + - gosec + - noctx + + - path: .*cmd.* + linters: + - noctx + + - path: main\.go + linters: + - noctx + + - path: .*cmd.* + text: "deep-exit" + + - path: main\.go + text: "deep-exit" + + # This check is of questionable value + - linters: + - tparallel + text: "call t.Parallel on the top level as well as its subtests" + + # Don't hide lint issues just because there are many of them + max-same-issues: 0 + max-issues-per-linter: 0 diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint new file mode 100644 index 000000000..9a08ad176 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint @@ -0,0 +1,16 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + brackets: + max-spaces-inside: 1 + comments: disable + comments-indentation: disable + document-start: disable + line-length: + level: warning + max: 160 + allow-non-breakable-inline-mappings: true + truthy: disable diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md new file mode 100644 index 000000000..349245d03 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md @@ -0,0 +1,250 @@ +# Change Log + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). +This change log follows the advice of [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog). + +## [Unreleased] + +## [3.0.0] - 2022-03-30 + +### Added + +- Rename, Mount and Unmount methods +- Parse more fields into Zpool type: + - dedupratio + - fragmentation + - freeing + - leaked + - readonly +- Parse more fields into Dataset type: + - referenced +- Incremental Send +- Parse numbers in exact format +- Support for Solaris (non-blockint, best-effort status) +- Debug logging for command invocation +- Use GitHub Actions for CI +- Nix shell for dev env reproducibility +- Direnv file for ease of dev +- Formatting/lint checks (enforced by CI) +- Go Module +- FreeBSD based vagrant machine + +### Changed + +- Temporarily adjust TestDiff expected strings depending on ZFS version +- Use one `zfs list`/`zpool list` call instead of many `zfs get`/`zpool get` +- ZFS docs links now point to OpenZFS pages +- Ubuntu vagrant box changed to generic/ubuntu2004 + +### Fixed + +- `GetProperty` returning `VALUE` instead of the actual value + +### Shortlog + + Amit Krishnan (1): + Issue #39 and Issue #40 - Enable Solaris support for go-zfs Switch from zfs/zpool get to zfs/zpool list for better performance Signed-off-by: Amit Krishnan + + Anand Patil (3): + Added Rename + Small fix to rename. + Added mount and umount methods + + Brian Akins (1): + Add 'referenced' to zfs properties + + Brian Bickerton (3): + Add debug logging before and after running external zfs command + Don't export the default no-op logger + Update uuid package repo url + + Dmitry Teselkin (1): + Issue #52 - fix parseLine for fragmentation field + + Edward Betts (1): + correct spelling mistake + + Justin Cormack (1): + Switch to google/uuid which is the maintained version of pborman/uuid + + Manuel Mendez (40): + rename Umount -> Unmount to follow zfs command name + add missing Unmount/Mount docs + always allocate largest Mount slice + add travis config + travis: update to go 1.7 + travis: get go deps first + test: add nok helper to verify an error occurred + test: add test for Dataset.GetProperty + ci: swap #cerana on freenode for slack + ci: install new deps for 0.7 relases + ci: bump zol versions + ci: bump go versions + ci: use better gometalinter invocations + ci: add ccache + ci: set env earlier in before_install + fix test nok error printing + test: restructure TestDiff to deal with different order of changes + test: better unicode path handling in TestDiff + travis: bump zfs and go versions + cache zfs artifacts + Add nix-shell and direnv goodness + prettierify all the files + Add go based tools + Add Makefile and rules.mk files + gofumptize the code base + Use tinkerbell/lint-install to setup linters + make golangci-lint happy + Update CONTRIBUTING.md with make based approach + Add GitHub Actions + Drop Travis CI + One sentence per line + Update documentation links to openzfs-docs pages + Format Vagrantfile using rufo + Add go-zfs.test to .gitignore + test: Avoid reptitive/duplicate error logging and quitting + test: Use t.Logf instead of fmt.Printf + test: Better cleanup and error handling in zpoolTest + test: Do not mark TestDatasets as a t.Helper. + test: Change zpoolTest to a pure helper that returns a clean up function + test: Move helpers to a different file + vagrant: Add set -euxo pipefail to provision script + vagrant: Update to generic/ubuntu2004 + vagrant: Minor fixes to Vagrantfile + vagrant: Update to go 1.17.8 + vagrant: Run go tests as part of provision script + vagrant: Indent heredoc script + vagrant: Add freebsd machine + + Matt Layher (1): + Parse more fields into Zpool type + + Michael Crosby (1): + Add incremental send + + Rikard Gynnerstedt (1): + remove command name from joined args + + Sebastiaan van Stijn (1): + Add go.mod and rename to github.com/mistifyio/go-zfs/v3 (v3.0.0) + + mikudeko (1): + Fix GetProperty always returning 'VALUE' + +## [2.1.1] - 2015-05-29 + +### Fixed + +- Ignoring first pool listed +- Incorrect `zfs get` argument ordering + +### Shortlog + + Alexey Guskov (1): + zfs command uses different order of arguments on freebsd + + Brian Akins (4): + test that ListZpools returns expected zpool + test error first + test error first + fix test to check correct return value + + James Cunningham (1): + Fix Truncating First Zpool + + Pat Norton (2): + Added Use of Go Tools + Update CONTRIBUTING.md + +## [2.1.0] - 2014-12-08 + +### Added + +- Parse hardlink modification count returned from `zfs diff` + +### Fixed + +- Continuing instead of erroring when rolling back a non-snapshot + +### Shortlog + + Brian Akins (2): + need to return the error here + use named struct fields + + Jörg Thalheim (1): + zfs diff handle hardlinks modification now + +## [2.0.0] - 2014-12-02 + +### Added + +- Flags for Destroy: + - DESTROY_DEFAULT + - DESTROY_DEFER_DELETION (`zfs destroy ... -d`) + - DESTROY_FORCE (`zfs destroy ... -f`) + - DESTROY_RECURSIVE_CLONES (`zfs destroy ... -R`) + - DESTROY_RECURSIVE (`zfs destroy ... -r`) + - etc +- Diff method (`zfs diff`) +- LogicalUsed and Origin properties to Dataset +- Type constants for Dataset +- State constants for Zpool +- Logger interface +- Improve documentation + +### Shortlog + + Brian Akins (8): + remove reflection + style change for switches + need to check for error + keep in scope + go 1.3.3 + golint cleanup + Just test if logical used is greater than 0, as this appears to be implementation specific + add docs to satisfy golint + + Jörg Thalheim (8): + Add deferred flag to zfs.Destroy() + add Logicalused property + Add Origin property + gofmt + Add zfs.Diff + Add Logger + add recursive destroy with clones + use CamelCase-style constants + + Matt Layher (4): + Improve documentation, document common ZFS operations, provide more references + Add zpool state constants, for easier health checking + Add dataset type constants, for easier type checking + Fix string split in command.Run(), use strings.Fields() instead of strings.Split() + +## [1.0.0] - 2014-11-12 + +### Shortlog + + Brian Akins (7): + add godoc badge + Add example + add information about zpool to struct and parser + Add Quota + add Children call + add Children call + fix snapshot tests + + Brian Bickerton (3): + MIST-150 Change Snapshot second paramater from properties map[string][string] to recursive bool + MIST-150 Add Rollback method and related tests + MIST-160 Add SendSnapshot streaming method and tests + + Matt Layher (1): + Add Error struct type and tests, enabling easier error return checking + +[3.0.0]: https://github.com/mistifyio/go-zfs/compare/v2.1.1...v3.0.0 +[2.1.1]: https://github.com/mistifyio/go-zfs/compare/v2.1.0...v2.1.1 +[2.1.0]: https://github.com/mistifyio/go-zfs/compare/v2.0.0...v2.1.0 +[2.0.0]: https://github.com/mistifyio/go-zfs/compare/v1.0.0...v2.0.0 +[1.0.0]: https://github.com/mistifyio/go-zfs/compare/v0.0.0...v1.0.0 diff --git a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md similarity index 54% rename from vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md rename to vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md index f1880c19e..9f625d564 100644 --- a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md +++ b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md @@ -1,20 +1,23 @@ -## How to Contribute ## +## How to Contribute -We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute. +We always welcome contributions to help make `go-zfs` better. +Please take a moment to read this document if you would like to contribute. -### Reporting issues ### +### Reporting issues We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests. If you find a bug: -* Use the GitHub issue search to check whether the bug has already been reported. -* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. -* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. +- Use the GitHub issue search to check whether the bug has already been reported. +- If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. +- If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. -### Pull requests ### +### Pull requests -We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request. +We welcome bug fixes, improvements, and new features. +Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. +For minor items, just open a pull request. [Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote: @@ -28,11 +31,13 @@ If you need to pull new changes committed upstream: $ git fetch upstream $ git merge upstream/master -Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature: +Don' work directly on master as this makes it harder to merge later. +Create a feature branch for your fix or new feature: $ git checkout -b -Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message. +Please try to commit your changes in logical chunks. +Ideally, you should include the issue number in the commit message. $ git commit -m "Issue # - " @@ -40,21 +45,20 @@ Push your feature branch to your fork. $ git push origin -[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes. +[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. +Please give your pull request a clear title and description and note which issue(s) your pull request fixes. -* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/). -* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). +- All linters should be happy (can be run with `make verify`). +- Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). **Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE). -### Go Tools ### -For consistency and to catch minor issues for all of go code, please run the following: -* goimports -* go vet -* golint -* errcheck +### Go Tools + +For consistency and to catch minor issues for all of go code, please run `make verify`. Many editors can execute the above on save. ----- +--- + Guidelines based on http://azkaban.github.io/contributing.html diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/LICENSE rename to vendor/github.com/mistifyio/go-zfs/v3/LICENSE diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v3/Makefile new file mode 100644 index 000000000..1c5f55e8c --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/Makefile @@ -0,0 +1,19 @@ +help: ## Print this help + @grep --no-filename -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sed 's/:.*## /·/' | sort | column -t -W 2 -s '·' -c $(shell tput cols) + +all: test ## Run tests + +-include rules.mk +-include lint.mk + +test: ## Run tests + go test ./... + +verify: gofumpt prettier lint ## Verify code style, is lint free, freshness ... + git diff | (! grep .) + +fix: gofumpt-fix prettier-fix ## Fix code formatting errors + +tools: ${toolsBins} ## Build Go based build tools + +.PHONY: all help test tools verify diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/v3/README.md similarity index 87% rename from vendor/github.com/mistifyio/go-zfs/README.md rename to vendor/github.com/mistifyio/go-zfs/v3/README.md index fef80d727..c91183300 100644 --- a/vendor/github.com/mistifyio/go-zfs/README.md +++ b/vendor/github.com/mistifyio/go-zfs/v3/README.md @@ -1,12 +1,12 @@ -# Go Wrapper for ZFS # +# Go Wrapper for ZFS Simple wrappers for ZFS command line tools. [![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) -## Requirements ## +## Requirements -You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: sudo apt-get install python-software-properties sudo apt-add-repository ppa:zfs-native/stable @@ -17,13 +17,13 @@ Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't u Generally you need root privileges to use anything zfs related. -## Status ## +## Status This has been only been tested on Ubuntu 14.04 In the future, we hope to work directly with libzfs. -# Hacking # +# Hacking The tests have decent examples for most functions. @@ -48,7 +48,6 @@ err := f.Destroy() ``` -# Contributing # +# Contributing See the [contributing guidelines](./CONTRIBUTING.md) - diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile new file mode 100644 index 000000000..7d8d2decd --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile @@ -0,0 +1,33 @@ +GOVERSION = "1.17.8" + +Vagrant.configure("2") do |config| + config.vm.define "ubuntu" do |ubuntu| + ubuntu.vm.box = "generic/ubuntu2004" + end + config.vm.define "freebsd" do |freebsd| + freebsd.vm.box = "generic/freebsd13" + end + config.ssh.forward_agent = true + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true + config.vm.provision "shell", inline: <<-EOF + set -euxo pipefail + + os=$(uname -s|tr '[A-Z]' '[a-z]') + case $os in + linux) apt-get update -y && apt-get install -y --no-install-recommends gcc libc-dev zfsutils-linux ;; + esac + + cd /tmp + curl -fLO --retry-max-time 30 --retry 10 https://go.dev/dl/go#{GOVERSION}.$os-amd64.tar.gz + tar -C /usr/local -zxf go#{GOVERSION}.$os-amd64.tar.gz + ln -nsf /usr/local/go/bin/go /usr/local/bin/go + rm -rf go*.tar.gz + + chown -R vagrant:vagrant /home/vagrant/go + cd /home/vagrant/go/src/github.com/mistifyio/go-zfs + go test -c + sudo ./go-zfs.test -test.v + CGO_ENABLED=0 go test -c + sudo ./go-zfs.test -test.v + EOF +end diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/v3/error.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/error.go rename to vendor/github.com/mistifyio/go-zfs/v3/error.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/go.mod b/vendor/github.com/mistifyio/go-zfs/v3/go.mod new file mode 100644 index 000000000..da2019c5a --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/mistifyio/go-zfs/v3 + +go 1.14 + +require github.com/google/uuid v1.2.0 diff --git a/vendor/github.com/mistifyio/go-zfs/v3/go.sum b/vendor/github.com/mistifyio/go-zfs/v3/go.sum new file mode 100644 index 000000000..a4d2b5ecb --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk new file mode 100644 index 000000000..a1e0a4fd3 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk @@ -0,0 +1,75 @@ +# BEGIN: lint-install -makefile lint.mk . +# http://github.com/tinkerbell/lint-install + +.PHONY: lint +lint: _lint + +LINT_ARCH := $(shell uname -m) +LINT_OS := $(shell uname) +LINT_OS_LOWER := $(shell echo $(LINT_OS) | tr '[:upper:]' '[:lower:]') +LINT_ROOT := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) + +# shellcheck and hadolint lack arm64 native binaries: rely on x86-64 emulation +ifeq ($(LINT_OS),Darwin) + ifeq ($(LINT_ARCH),arm64) + LINT_ARCH=x86_64 + endif +endif + +LINTERS := +FIXERS := + +SHELLCHECK_VERSION ?= v0.8.0 +SHELLCHECK_BIN := out/linters/shellcheck-$(SHELLCHECK_VERSION)-$(LINT_ARCH) +$(SHELLCHECK_BIN): + mkdir -p out/linters + rm -rf out/linters/shellcheck-* + curl -sSfL https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VERSION)/shellcheck-$(SHELLCHECK_VERSION).$(LINT_OS_LOWER).$(LINT_ARCH).tar.xz | tar -C out/linters -xJf - + mv out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck $@ + rm -rf out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck + +LINTERS += shellcheck-lint +shellcheck-lint: $(SHELLCHECK_BIN) + $(SHELLCHECK_BIN) $(shell find . -name "*.sh") + +FIXERS += shellcheck-fix +shellcheck-fix: $(SHELLCHECK_BIN) + $(SHELLCHECK_BIN) $(shell find . -name "*.sh") -f diff | { read -t 1 line || exit 0; { echo "$$line" && cat; } | git apply -p2; } + +GOLANGCI_LINT_CONFIG := $(LINT_ROOT)/.golangci.yml +GOLANGCI_LINT_VERSION ?= v1.43.0 +GOLANGCI_LINT_BIN := out/linters/golangci-lint-$(GOLANGCI_LINT_VERSION)-$(LINT_ARCH) +$(GOLANGCI_LINT_BIN): + mkdir -p out/linters + rm -rf out/linters/golangci-lint-* + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b out/linters $(GOLANGCI_LINT_VERSION) + mv out/linters/golangci-lint $@ + +LINTERS += golangci-lint-lint +golangci-lint-lint: $(GOLANGCI_LINT_BIN) + find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" \; + +FIXERS += golangci-lint-fix +golangci-lint-fix: $(GOLANGCI_LINT_BIN) + find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" --fix \; + +YAMLLINT_VERSION ?= 1.26.3 +YAMLLINT_ROOT := out/linters/yamllint-$(YAMLLINT_VERSION) +YAMLLINT_BIN := $(YAMLLINT_ROOT)/dist/bin/yamllint +$(YAMLLINT_BIN): + mkdir -p out/linters + rm -rf out/linters/yamllint-* + curl -sSfL https://github.com/adrienverge/yamllint/archive/refs/tags/v$(YAMLLINT_VERSION).tar.gz | tar -C out/linters -zxf - + cd $(YAMLLINT_ROOT) && pip3 install --target dist . + +LINTERS += yamllint-lint +yamllint-lint: $(YAMLLINT_BIN) + PYTHONPATH=$(YAMLLINT_ROOT)/dist $(YAMLLINT_ROOT)/dist/bin/yamllint . + +.PHONY: _lint $(LINTERS) +_lint: $(LINTERS) + +.PHONY: fix $(FIXERS) +fix: $(FIXERS) + +# END: lint-install -makefile lint.mk . diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk new file mode 100644 index 000000000..4746c978a --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk @@ -0,0 +1,49 @@ +# Only use the recipes defined in these makefiles +MAKEFLAGS += --no-builtin-rules +.SUFFIXES: +# Delete target files if there's an error +# This avoids a failure to then skip building on next run if the output is created by shell redirection for example +# Not really necessary for now, but just good to have already if it becomes necessary later. +.DELETE_ON_ERROR: +# Treat the whole recipe as a one shell script/invocation instead of one-per-line +.ONESHELL: +# Use bash instead of plain sh +SHELL := bash +.SHELLFLAGS := -o pipefail -euc + +version := $(shell git rev-parse --short HEAD) +tag := $(shell git tag --points-at HEAD) +ifneq (,$(tag)) +version := $(tag)-$(version) +endif +LDFLAGS := -ldflags "-X main.version=$(version)" +export CGO_ENABLED := 0 + +ifeq ($(origin GOBIN), undefined) +GOBIN := ${PWD}/bin +export GOBIN +PATH := ${GOBIN}:${PATH} +export PATH +endif + +toolsBins := $(addprefix bin/,$(notdir $(shell grep '^\s*_' tooling/tools.go | awk -F'"' '{print $$2}'))) + +# installs cli tools defined in tools.go +$(toolsBins): tooling/go.mod tooling/go.sum tooling/tools.go +$(toolsBins): CMD=$(shell awk -F'"' '/$(@F)"/ {print $$2}' tooling/tools.go) +$(toolsBins): + cd tooling && go install $(CMD) + +.PHONY: gofumpt +gofumpt: bin/gofumpt + gofumpt -s -d . + +gofumpt-fix: bin/gofumpt + gofumpt -s -w . + +.PHONY: prettier prettier-fix +prettier: + prettier --list-different --ignore-path .gitignore . + +prettier-fix: + prettier --write --ignore-path .gitignore . diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix new file mode 100644 index 000000000..e0ea24c16 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix @@ -0,0 +1,26 @@ +let _pkgs = import { }; +in { pkgs ? import (_pkgs.fetchFromGitHub { + owner = "NixOS"; + repo = "nixpkgs"; + #branch@date: 21.11@2022-02-13 + rev = "560ad8a2f89586ab1a14290f128ad6a393046065"; + sha256 = "0s0dv1clfpjyzy4p6ywxvzmwx9ddbr2yl77jf1wqdbr0x1206hb8"; +}) { } }: + +with pkgs; + +mkShell { + buildInputs = [ + git + gnumake + gnused + go + nixfmt + nodePackages.prettier + python3Packages.pip + python3Packages.setuptools + rufo + shfmt + vagrant + ]; +} diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go similarity index 74% rename from vendor/github.com/mistifyio/go-zfs/utils.go rename to vendor/github.com/mistifyio/go-zfs/v3/utils.go index c18c2c3da..0c2cce7d9 100644 --- a/vendor/github.com/mistifyio/go-zfs/utils.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go @@ -21,7 +21,6 @@ type command struct { } func (c *command) Run(arg ...string) ([][]string, error) { - cmd := exec.Command(c.Command, arg...) var stdout, stderr bytes.Buffer @@ -34,7 +33,6 @@ func (c *command) Run(arg ...string) ([][]string, error) { if c.Stdin != nil { cmd.Stdin = c.Stdin - } cmd.Stderr = &stderr @@ -42,16 +40,14 @@ func (c *command) Run(arg ...string) ([][]string, error) { joinedArgs := strings.Join(cmd.Args, " ") logger.Log([]string{"ID:" + id, "START", joinedArgs}) - err := cmd.Run() - logger.Log([]string{"ID:" + id, "FINISH"}) - - if err != nil { + if err := cmd.Run(); err != nil { return nil, &Error{ Err: err, Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "), Stderr: stderr.String(), } } + logger.Log([]string{"ID:" + id, "FINISH"}) // assume if you passed in something for stdout, that you know what to do with it if c.Stdout != nil { @@ -60,7 +56,7 @@ func (c *command) Run(arg ...string) ([][]string, error) { lines := strings.Split(stdout.String(), "\n") - //last line is always blank + // last line is always blank lines = lines[0 : len(lines)-1] output := make([][]string, len(lines)) @@ -92,33 +88,33 @@ func setUint(field *uint64, value string) error { return nil } -func (ds *Dataset) parseLine(line []string) error { +func (d *Dataset) parseLine(line []string) error { var err error if len(line) != len(dsPropList) { - return errors.New("Output does not match what is expected on this platform") + return errors.New("output does not match what is expected on this platform") } - setString(&ds.Name, line[0]) - setString(&ds.Origin, line[1]) + setString(&d.Name, line[0]) + setString(&d.Origin, line[1]) - if err = setUint(&ds.Used, line[2]); err != nil { + if err = setUint(&d.Used, line[2]); err != nil { return err } - if err = setUint(&ds.Avail, line[3]); err != nil { + if err = setUint(&d.Avail, line[3]); err != nil { return err } - setString(&ds.Mountpoint, line[4]) - setString(&ds.Compression, line[5]) - setString(&ds.Type, line[6]) + setString(&d.Mountpoint, line[4]) + setString(&d.Compression, line[5]) + setString(&d.Type, line[6]) - if err = setUint(&ds.Volsize, line[7]); err != nil { + if err = setUint(&d.Volsize, line[7]); err != nil { return err } - if err = setUint(&ds.Quota, line[8]); err != nil { + if err = setUint(&d.Quota, line[8]); err != nil { return err } - if err = setUint(&ds.Referenced, line[9]); err != nil { + if err = setUint(&d.Referenced, line[9]); err != nil { return err } @@ -126,17 +122,13 @@ func (ds *Dataset) parseLine(line []string) error { return nil } - if err = setUint(&ds.Written, line[10]); err != nil { + if err = setUint(&d.Written, line[10]); err != nil { return err } - if err = setUint(&ds.Logicalused, line[11]); err != nil { + if err = setUint(&d.Logicalused, line[11]); err != nil { return err } - if err = setUint(&ds.Usedbydataset, line[12]); err != nil { - return err - } - - return nil + return setUint(&d.Usedbydataset, line[12]) } /* @@ -156,12 +148,12 @@ func unescapeFilepath(path string) (string, error) { for i := 0; i < llen; { if path[i] == '\\' { if llen < i+4 { - return "", fmt.Errorf("Invalid octal code: too short") + return "", fmt.Errorf("invalid octal code: too short") } octalCode := path[(i + 1):(i + 4)] val, err := strconv.ParseUint(octalCode, 8, 8) if err != nil { - return "", fmt.Errorf("Invalid octal code: %v", err) + return "", fmt.Errorf("invalid octal code: %w", err) } buf = append(buf, byte(val)) i += 4 @@ -179,6 +171,7 @@ var changeTypeMap = map[string]ChangeType{ "M": Modified, "R": Renamed, } + var inodeTypeMap = map[string]InodeType{ "B": BlockDevice, "C": CharacterDevice, @@ -191,51 +184,51 @@ var inodeTypeMap = map[string]InodeType{ "F": File, } -// matches (+1) or (-1) -var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") +// matches (+1) or (-1). +var referenceCountRegex = regexp.MustCompile(`\(([+-]\d+?)\)`) func parseReferenceCount(field string) (int, error) { matches := referenceCountRegex.FindStringSubmatch(field) if matches == nil { - return 0, fmt.Errorf("Regexp does not match") + return 0, fmt.Errorf("regexp does not match") } return strconv.Atoi(matches[1]) } func parseInodeChange(line []string) (*InodeChange, error) { - llen := len(line) + llen := len(line) // nolint:ifshort // llen *is* actually used if llen < 1 { - return nil, fmt.Errorf("Empty line passed") + return nil, fmt.Errorf("empty line passed") } changeType := changeTypeMap[line[0]] if changeType == 0 { - return nil, fmt.Errorf("Unknown change type '%s'", line[0]) + return nil, fmt.Errorf("unknown change type '%s'", line[0]) } switch changeType { case Renamed: if llen != 4 { - return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) + return nil, fmt.Errorf("mismatching number of fields: expect 4, got: %d", llen) } case Modified: if llen != 4 && llen != 3 { - return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) + return nil, fmt.Errorf("mismatching number of fields: expect 3..4, got: %d", llen) } default: if llen != 3 { - return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) + return nil, fmt.Errorf("mismatching number of fields: expect 3, got: %d", llen) } } inodeType := inodeTypeMap[line[1]] if inodeType == 0 { - return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) + return nil, fmt.Errorf("unknown inode type '%s'", line[1]) } path, err := unescapeFilepath(line[2]) if err != nil { - return nil, fmt.Errorf("Failed to parse filename: %v", err) + return nil, fmt.Errorf("failed to parse filename: %w", err) } var newPath string @@ -244,13 +237,13 @@ func parseInodeChange(line []string) (*InodeChange, error) { case Renamed: newPath, err = unescapeFilepath(line[3]) if err != nil { - return nil, fmt.Errorf("Failed to parse filename: %v", err) + return nil, fmt.Errorf("failed to parse filename: %w", err) } case Modified: if llen == 4 { referenceCount, err = parseReferenceCount(line[3]) if err != nil { - return nil, fmt.Errorf("Failed to parse reference count: %v", err) + return nil, fmt.Errorf("failed to parse reference count: %w", err) } } default: @@ -266,18 +259,19 @@ func parseInodeChange(line []string) (*InodeChange, error) { }, nil } -// example input -//M / /testpool/bar/ -//+ F /testpool/bar/hello.txt -//M / /testpool/bar/hello.txt (+1) -//M / /testpool/bar/hello-hardlink +// example input for parseInodeChanges +// M / /testpool/bar/ +// + F /testpool/bar/hello.txt +// M / /testpool/bar/hello.txt (+1) +// M / /testpool/bar/hello-hardlink + func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { changes := make([]*InodeChange, len(lines)) for i, line := range lines { c, err := parseInodeChange(line) if err != nil { - return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) + return nil, fmt.Errorf("failed to parse line %d of zfs diff: %w, got: '%s'", i, err, line) } changes[i] = c } @@ -290,7 +284,7 @@ func listByType(t, filter string) ([]*Dataset, error) { if filter != "" { args = append(args, filter) } - out, err := zfs(args...) + out, err := zfsOutput(args...) if err != nil { return nil, err } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go new file mode 100644 index 000000000..ef1beac90 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go @@ -0,0 +1,19 @@ +//go:build !solaris +// +build !solaris + +package zfs + +import "strings" + +var ( + // List of ZFS properties to retrieve from zfs list command on a non-Solaris platform. + dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"} + + dsPropListOptions = strings.Join(dsPropList, ",") + + // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform. + zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} + + zpoolPropListOptions = strings.Join(zpoolPropList, ",") + zpoolArgs = []string{"get", "-p", zpoolPropListOptions} +) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go new file mode 100644 index 000000000..c6bf6d87a --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go @@ -0,0 +1,19 @@ +//go:build solaris +// +build solaris + +package zfs + +import "strings" + +var ( + // List of ZFS properties to retrieve from zfs list command on a Solaris platform + dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"} + + dsPropListOptions = strings.Join(dsPropList, ",") + + // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform + zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} + + zpoolPropListOptions = strings.Join(zpoolPropList, ",") + zpoolArgs = []string{"get", "-p", zpoolPropListOptions} +) diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go similarity index 70% rename from vendor/github.com/mistifyio/go-zfs/zfs.go rename to vendor/github.com/mistifyio/go-zfs/v3/zfs.go index 4e5087ffe..1166bdc21 100644 --- a/vendor/github.com/mistifyio/go-zfs/zfs.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go @@ -9,19 +9,18 @@ import ( "strings" ) -// ZFS dataset types, which can indicate if a dataset is a filesystem, -// snapshot, or volume. +// ZFS dataset types, which can indicate if a dataset is a filesystem, snapshot, or volume. const ( DatasetFilesystem = "filesystem" DatasetSnapshot = "snapshot" DatasetVolume = "volume" ) -// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, -// or volume. The Type struct member can be used to determine a dataset's type. +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, or volume. +// The Type struct member can be used to determine a dataset's type. // // The field definitions can be found in the ZFS manual: -// http://www.freebsd.org/cgi/man.cgi?zfs(8). +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. type Dataset struct { Name string Origin string @@ -38,10 +37,10 @@ type Dataset struct { Referenced uint64 } -// InodeType is the type of inode as reported by Diff +// InodeType is the type of inode as reported by Diff. type InodeType int -// Types of Inodes +// Types of Inodes. const ( _ = iota // 0 == unknown type BlockDevice InodeType = iota @@ -55,10 +54,10 @@ const ( File ) -// ChangeType is the type of inode change as reported by Diff +// ChangeType is the type of inode change as reported by Diff. type ChangeType int -// Types of Changes +// Types of Changes. const ( _ = iota // 0 == unknown type Removed ChangeType = iota @@ -67,10 +66,10 @@ const ( Renamed ) -// DestroyFlag is the options flag passed to Destroy +// DestroyFlag is the options flag passed to Destroy. type DestroyFlag int -// Valid destroy options +// Valid destroy options. const ( DestroyDefault DestroyFlag = 1 << iota DestroyRecursive = 1 << iota @@ -79,7 +78,7 @@ const ( DestroyForceUmount = 1 << iota ) -// InodeChange represents a change as reported by Diff +// InodeChange represents a change as reported by Diff. type InodeChange struct { Change ChangeType Type InodeType @@ -88,65 +87,65 @@ type InodeChange struct { ReferenceCountChange int } -// Logger can be used to log commands/actions +// Logger can be used to log commands/actions. type Logger interface { Log(cmd []string) } type defaultLogger struct{} -func (*defaultLogger) Log(cmd []string) { - return +func (*defaultLogger) Log([]string) { } var logger Logger = &defaultLogger{} -// SetLogger set a log handler to log all commands including arguments before -// they are executed +// SetLogger set a log handler to log all commands including arguments before they are executed. func SetLogger(l Logger) { if l != nil { logger = l } } +// zfs is a helper function to wrap typical calls to zfs that ignores stdout. +func zfs(arg ...string) error { + _, err := zfsOutput(arg...) + return err +} + // zfs is a helper function to wrap typical calls to zfs. -func zfs(arg ...string) ([][]string, error) { +func zfsOutput(arg ...string) ([][]string, error) { c := command{Command: "zfs"} return c.Run(arg...) } // Datasets returns a slice of ZFS datasets, regardless of type. -// A filter argument may be passed to select a dataset with the matching name, -// or empty string ("") may be used to select all datasets. +// A filter argument may be passed to select a dataset with the matching name, or empty string ("") may be used to select all datasets. func Datasets(filter string) ([]*Dataset, error) { return listByType("all", filter) } // Snapshots returns a slice of ZFS snapshots. -// A filter argument may be passed to select a snapshot with the matching name, -// or empty string ("") may be used to select all snapshots. +// A filter argument may be passed to select a snapshot with the matching name, or empty string ("") may be used to select all snapshots. func Snapshots(filter string) ([]*Dataset, error) { return listByType(DatasetSnapshot, filter) } // Filesystems returns a slice of ZFS filesystems. -// A filter argument may be passed to select a filesystem with the matching name, -// or empty string ("") may be used to select all filesystems. +// A filter argument may be passed to select a filesystem with the matching name, or empty string ("") may be used to select all filesystems. func Filesystems(filter string) ([]*Dataset, error) { return listByType(DatasetFilesystem, filter) } // Volumes returns a slice of ZFS volumes. -// A filter argument may be passed to select a volume with the matching name, -// or empty string ("") may be used to select all volumes. +// A filter argument may be passed to select a volume with the matching name, or empty string ("") may be used to select all volumes. func Volumes(filter string) ([]*Dataset, error) { return listByType(DatasetVolume, filter) } -// GetDataset retrieves a single ZFS dataset by name. This dataset could be -// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +// GetDataset retrieves a single ZFS dataset by name. +// This dataset could be any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. func GetDataset(name string) (*Dataset, error) { - out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name) + out, err := zfsOutput("list", "-Hp", "-o", dsPropListOptions, name) if err != nil { return nil, err } @@ -174,8 +173,7 @@ func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, er args = append(args, propsSlice(properties)...) } args = append(args, []string{d.Name, dest}...) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(dest) @@ -192,8 +190,7 @@ func (d *Dataset) Unmount(force bool) (*Dataset, error) { args = append(args, "-f") } args = append(args, d.Name) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(d.Name) @@ -214,20 +211,17 @@ func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) { args = append(args, strings.Join(options, ",")) } args = append(args, d.Name) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(d.Name) } -// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a -// new snapshot with the specified name, and streams the input data into the -// newly-created snapshot. +// ReceiveSnapshot receives a ZFS stream from the input io.Reader. +// A new snapshot is created with the specified name, and streams the input data into the newly-created snapshot. func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { c := command{Command: "zfs", Stdin: input} - _, err := c.Run("receive", name) - if err != nil { + if _, err := c.Run("receive", name); err != nil { return nil, err } return GetDataset(name) @@ -245,10 +239,21 @@ func (d *Dataset) SendSnapshot(output io.Writer) error { return err } -// CreateVolume creates a new ZFS volume with the specified name, size, and -// properties. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). +// IncrementalSend sends a ZFS stream of a snapshot to the input io.Writer using the baseSnapshot as the starting point. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) IncrementalSend(baseSnapshot *Dataset, output io.Writer) error { + if d.Type != DatasetSnapshot || baseSnapshot.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", "-i", baseSnapshot.Name, d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and properties. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { args := make([]string, 4, 5) args[0] = "create" @@ -259,17 +264,15 @@ func CreateVolume(name string, size uint64, properties map[string]string) (*Data args = append(args, propsSlice(properties)...) } args = append(args, name) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(name) } -// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any -// descendents of the dataset will be recursively destroyed, including snapshots. -// If the deferred bit flag is set, the snapshot is marked for deferred -// deletion. +// Destroy destroys a ZFS dataset. +// If the destroy bit flag is set, any descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred deletion. func (d *Dataset) Destroy(flags DestroyFlag) error { args := make([]string, 1, 3) args[0] = "destroy" @@ -290,25 +293,26 @@ func (d *Dataset) Destroy(flags DestroyFlag) error { } args = append(args, d.Name) - _, err := zfs(args...) + err := zfs(args...) return err } // SetProperty sets a ZFS property on the receiving dataset. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func (d *Dataset) SetProperty(key, val string) error { prop := strings.Join([]string{key, val}, "=") - _, err := zfs("set", prop, d.Name) + err := zfs("set", prop, d.Name) return err } -// GetProperty returns the current value of a ZFS property from the -// receiving dataset. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). +// GetProperty returns the current value of a ZFS property from the receiving dataset. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func (d *Dataset) GetProperty(key string) (string, error) { - out, err := zfs("get", "-H", key, d.Name) + out, err := zfsOutput("get", "-H", key, d.Name) if err != nil { return "", err } @@ -317,7 +321,7 @@ func (d *Dataset) GetProperty(key string) (string, error) { } // Rename renames a dataset. -func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) { +func (d *Dataset) Rename(name string, createParent, recursiveRenameSnapshots bool) (*Dataset, error) { args := make([]string, 3, 5) args[0] = "rename" args[1] = d.Name @@ -328,8 +332,7 @@ func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshot if recursiveRenameSnapshots { args = append(args, "-r") } - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return d, err } @@ -341,10 +344,10 @@ func (d *Dataset) Snapshots() ([]*Dataset, error) { return Snapshots(d.Name) } -// CreateFilesystem creates a new ZFS filesystem with the specified name and -// properties. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). +// CreateFilesystem creates a new ZFS filesystem with the specified name and properties. +// +// A full list of available ZFS properties may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { args := make([]string, 1, 4) args[0] = "create" @@ -354,16 +357,14 @@ func CreateFilesystem(name string, properties map[string]string) (*Dataset, erro } args = append(args, name) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(name) } -// Snapshot creates a new ZFS snapshot of the receiving dataset, using the -// specified name. Optionally, the snapshot can be taken recursively, creating -// snapshots of all descendent filesystems in a single, atomic operation. +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the specified name. +// Optionally, the snapshot can be taken recursively, creating snapshots of all descendent filesystems in a single, atomic operation. func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { args := make([]string, 1, 4) args[0] = "snapshot" @@ -372,17 +373,15 @@ func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { } snapName := fmt.Sprintf("%s@%s", d.Name, name) args = append(args, snapName) - _, err := zfs(args...) - if err != nil { + if err := zfs(args...); err != nil { return nil, err } return GetDataset(snapName) } // Rollback rolls back the receiving ZFS dataset to a previous snapshot. -// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot -// rollback cannot be completed without this option, if more recent -// snapshots exist. +// Optionally, intermediate snapshots can be destroyed. +// A ZFS snapshot rollback cannot be completed without this option, if more recent snapshots exist. // An error will be returned if the input dataset is not of snapshot type. func (d *Dataset) Rollback(destroyMoreRecent bool) error { if d.Type != DatasetSnapshot { @@ -396,13 +395,12 @@ func (d *Dataset) Rollback(destroyMoreRecent bool) error { } args = append(args, d.Name) - _, err := zfs(args...) + err := zfs(args...) return err } // Children returns a slice of children of the receiving ZFS dataset. -// A recursion depth may be specified, or a depth of 0 allows unlimited -// recursion. +// A recursion depth may be specified, or a depth of 0 allows unlimited recursion. func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { args := []string{"list"} if depth > 0 { @@ -414,7 +412,7 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions) args = append(args, d.Name) - out, err := zfs(args...) + out, err := zfsOutput(args...) if err != nil { return nil, err } @@ -436,11 +434,10 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { } // Diff returns changes between a snapshot and the given ZFS dataset. -// The snapshot name must include the filesystem part as it is possible to -// compare clones with their origin snapshots. +// The snapshot name must include the filesystem part as it is possible to compare clones with their origin snapshots. func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { - args := []string{"diff", "-FH", snapshot, d.Name}[:] - out, err := zfs(args...) + args := []string{"diff", "-FH", snapshot, d.Name} + out, err := zfsOutput(args...) if err != nil { return nil, err } diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go similarity index 71% rename from vendor/github.com/mistifyio/go-zfs/zpool.go rename to vendor/github.com/mistifyio/go-zfs/v3/zpool.go index d8db945d7..2f7071305 100644 --- a/vendor/github.com/mistifyio/go-zfs/zpool.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go @@ -1,8 +1,9 @@ package zfs -// ZFS zpool states, which can indicate if a pool is online, offline, -// degraded, etc. More information regarding zpool states can be found here: -// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. +// ZFS zpool states, which can indicate if a pool is online, offline, degraded, etc. +// +// More information regarding zpool states can be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zpoolconcepts.7.html#Device_Failure_and_Recovery const ( ZpoolOnline = "ONLINE" ZpoolDegraded = "DEGRADED" @@ -12,8 +13,8 @@ const ( ZpoolRemoved = "REMOVED" ) -// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can -// contain many descendent datasets. +// Zpool is a ZFS zpool. +// A pool is a top-level structure in ZFS, and can contain many descendent datasets. type Zpool struct { Name string Health string @@ -27,8 +28,14 @@ type Zpool struct { DedupRatio float64 } +// zpool is a helper function to wrap typical calls to zpool and ignores stdout. +func zpool(arg ...string) error { + _, err := zpoolOutput(arg...) + return err +} + // zpool is a helper function to wrap typical calls to zpool. -func zpool(arg ...string) ([][]string, error) { +func zpoolOutput(arg ...string) ([][]string, error) { c := command{Command: "zpool"} return c.Run(arg...) } @@ -37,7 +44,7 @@ func zpool(arg ...string) ([][]string, error) { func GetZpool(name string) (*Zpool, error) { args := zpoolArgs args = append(args, name) - out, err := zpool(args...) + out, err := zpoolOutput(args...) if err != nil { return nil, err } @@ -65,10 +72,11 @@ func (z *Zpool) Snapshots() ([]*Dataset, error) { return Snapshots(z.Name) } -// CreateZpool creates a new ZFS zpool with the specified name, properties, -// and optional arguments. -// A full list of available ZFS properties and command-line arguments may be -// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). +// CreateZpool creates a new ZFS zpool with the specified name, properties, and optional arguments. +// +// A full list of available ZFS properties and command-line arguments may be found in the ZFS manual: +// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. +// https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { cli := make([]string, 1, 4) cli[0] = "create" @@ -77,8 +85,7 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp } cli = append(cli, name) cli = append(cli, args...) - _, err := zpool(cli...) - if err != nil { + if err := zpool(cli...); err != nil { return nil, err } @@ -87,14 +94,14 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp // Destroy destroys a ZFS zpool by name. func (z *Zpool) Destroy() error { - _, err := zpool("destroy", z.Name) + err := zpool("destroy", z.Name) return err } // ListZpools list all ZFS zpools accessible on the current system. func ListZpools() ([]*Zpool, error) { args := []string{"list", "-Ho", "name"} - out, err := zpool(args...) + out, err := zpoolOutput(args...) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/sys/mountinfo/go.mod b/vendor/github.com/moby/sys/mountinfo/go.mod index 9749ea96d..e1bcdfe79 100644 --- a/vendor/github.com/moby/sys/mountinfo/go.mod +++ b/vendor/github.com/moby/sys/mountinfo/go.mod @@ -1,5 +1,5 @@ module github.com/moby/sys/mountinfo -go 1.14 +go 1.16 -require golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 +require golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a diff --git a/vendor/github.com/moby/sys/mountinfo/go.sum b/vendor/github.com/moby/sys/mountinfo/go.sum index 2a5be7ea8..af14a66ec 100644 --- a/vendor/github.com/moby/sys/mountinfo/go.sum +++ b/vendor/github.com/moby/sys/mountinfo/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 h1:W0lCpv29Hv0UaM1LXb9QlBHLNP8UFfcKjblhVCWftOM= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go index bc9f6b2ad..e78e72619 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -7,6 +7,34 @@ import ( "golang.org/x/sys/unix" ) +// MountedFast is a method of detecting a mount point without reading +// mountinfo from procfs. A caller can only trust the result if no error +// and sure == true are returned. Otherwise, other methods (e.g. parsing +// /proc/mounts) have to be used. If unsure, use Mounted instead (which +// uses MountedFast, but falls back to parsing mountinfo if needed). +// +// If a non-existent path is specified, an appropriate error is returned. +// In case the caller is not interested in this particular error, it should +// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). +// +// This function is only available on Linux. When available (since kernel +// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise, +// the implementation falls back to using stat(2), which can reliably detect +// normal (but not bind) mounts. +func MountedFast(path string) (mounted, sure bool, err error) { + // Root is always mounted. + if path == string(os.PathSeparator) { + return true, true, nil + } + + path, err = normalizePath(path) + if err != nil { + return false, false, err + } + mounted, sure, err = mountedFast(path) + return +} + // mountedByOpenat2 is a method of detecting a mount that works for all kinds // of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel. func mountedByOpenat2(path string) (bool, error) { @@ -16,9 +44,6 @@ func mountedByOpenat2(path string) (bool, error) { Flags: unix.O_PATH | unix.O_CLOEXEC, }) if err != nil { - if err == unix.ENOENT { // not a mount - return false, nil - } return false, &os.PathError{Op: "openat2", Path: dir, Err: err} } fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{ @@ -26,33 +51,51 @@ func mountedByOpenat2(path string) (bool, error) { Resolve: unix.RESOLVE_NO_XDEV, }) _ = unix.Close(dirfd) - switch err { + switch err { //nolint:errorlint // unix errors are bare case nil: // definitely not a mount _ = unix.Close(fd) return false, nil case unix.EXDEV: // definitely a mount return true, nil - case unix.ENOENT: // not a mount - return false, nil } // not sure return false, &os.PathError{Op: "openat2", Path: path, Err: err} } -func mounted(path string) (bool, error) { +// mountedFast is similar to MountedFast, except it expects a normalized path. +func mountedFast(path string) (mounted, sure bool, err error) { + // Root is always mounted. + if path == string(os.PathSeparator) { + return true, true, nil + } + // Try a fast path, using openat2() with RESOLVE_NO_XDEV. - mounted, err := mountedByOpenat2(path) + mounted, err = mountedByOpenat2(path) if err == nil { - return mounted, nil + return mounted, true, nil } + // Another fast path: compare st.st_dev fields. mounted, err = mountedByStat(path) // This does not work for bind mounts, so false negative // is possible, therefore only trust if return is true. if mounted && err == nil { + return true, true, nil + } + + return +} + +func mounted(path string) (bool, error) { + path, err := normalizePath(path) + if err != nil { + return false, err + } + mounted, sure, err := mountedFast(path) + if sure && err == nil { return mounted, nil } - // Fallback to parsing mountinfo + // Fallback to parsing mountinfo. return mountedByMountinfo(path) } diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index efb03978b..c7b7678f9 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -1,10 +1,9 @@ -// +build linux freebsd,cgo openbsd,cgo +//go:build linux || freebsd || openbsd || darwin +// +build linux freebsd openbsd darwin package mountinfo import ( - "errors" - "fmt" "os" "path/filepath" @@ -15,10 +14,6 @@ func mountedByStat(path string) (bool, error) { var st unix.Stat_t if err := unix.Lstat(path, &st); err != nil { - if err == unix.ENOENT { - // Treat ENOENT as "not mounted". - return false, nil - } return false, &os.PathError{Op: "stat", Path: path, Err: err} } dev := st.Dev @@ -37,26 +32,18 @@ func mountedByStat(path string) (bool, error) { func normalizePath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + return "", err } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + return "", err } if _, err := os.Stat(realPath); err != nil { - return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + return "", err } return realPath, nil } func mountedByMountinfo(path string) (bool, error) { - path, err := normalizePath(path) - if err != nil { - if errors.Is(err, unix.ENOENT) { - // treat ENOENT as "not mounted" - return false, nil - } - return false, err - } entries, err := GetMounts(SingleEntryFilter(path)) if err != nil { return false, err diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go index 403a89331..574aeb876 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go @@ -10,11 +10,12 @@ func GetMounts(f FilterFunc) ([]*Info, error) { return parseMountTable(f) } -// Mounted determines if a specified path is a mount point. +// Mounted determines if a specified path is a mount point. In case of any +// error, false (and an error) is returned. // -// The argument must be an absolute path, with all symlinks resolved, and clean. -// One way to ensure it is to process the path using filepath.Abs followed by -// filepath.EvalSymlinks before calling this function. +// If a non-existent path is specified, an appropriate error is returned. +// In case the caller is not interested in this particular error, it should +// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). func Mounted(path string) (bool, error) { // root is always mounted if path == string(os.PathSeparator) { diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go index b1c12d02b..8420f58c7 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go @@ -1,52 +1,37 @@ -// +build freebsd,cgo openbsd,cgo +//go:build freebsd || openbsd || darwin +// +build freebsd openbsd darwin package mountinfo -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) +import "golang.org/x/sys/unix" // parseMountTable returns information about mounted filesystems func parseMountTable(filter FilterFunc) ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") + count, err := unix.Getfsstat(nil, unix.MNT_WAIT) + if err != nil { + return nil, err } - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) + entries := make([]unix.Statfs_t, count) + _, err = unix.Getfsstat(entries, unix.MNT_WAIT) + if err != nil { + return nil, err + } var out []*Info for _, entry := range entries { - var mountinfo Info var skip, stop bool - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.FSType = C.GoString(&entry.f_fstypename[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo := getMountinfo(&entry) if filter != nil { // filter out entries we're not interested in - skip, stop = filter(&mountinfo) + skip, stop = filter(mountinfo) if skip { continue } } - out = append(out, &mountinfo) + out = append(out, mountinfo) if stop { break } @@ -55,6 +40,10 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { } func mounted(path string) (bool, error) { + path, err := normalizePath(path) + if err != nil { + return false, err + } // Fast path: compare st.st_dev fields. // This should always work for FreeBSD and OpenBSD. mounted, err := mountedByStat(path) diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go new file mode 100644 index 000000000..ecaaa7a9c --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go @@ -0,0 +1,14 @@ +//go:build freebsd || darwin +// +build freebsd darwin + +package mountinfo + +import "golang.org/x/sys/unix" + +func getMountinfo(entry *unix.Statfs_t) *Info { + return &Info{ + Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]), + FSType: unix.ByteSliceToString(entry.Fstypename[:]), + Source: unix.ByteSliceToString(entry.Mntfromname[:]), + } +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go index f09a70fa0..59332b07b 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go @@ -52,7 +52,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { numFields := len(fields) if numFields < 10 { // should be at least 10 fields - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields) } // separator field @@ -67,7 +67,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { for fields[sepIdx] != "-" { sepIdx-- if sepIdx == 5 { - return nil, fmt.Errorf("Parsing '%s' failed: missing - separator", text) + return nil, fmt.Errorf("parsing '%s' failed: missing - separator", text) } } @@ -75,46 +75,39 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Mountpoint, err = unescape(fields[4]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err) + return nil, fmt.Errorf("parsing '%s' failed: mount point: %w", fields[4], err) } p.FSType, err = unescape(fields[sepIdx+1]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) + return nil, fmt.Errorf("parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) } p.Source, err = unescape(fields[sepIdx+2]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err) + return nil, fmt.Errorf("parsing '%s' failed: source: %w", fields[sepIdx+2], err) } p.VFSOptions = fields[sepIdx+3] // ignore any numbers parsing errors, as there should not be any p.ID, _ = strconv.Atoi(fields[0]) p.Parent, _ = strconv.Atoi(fields[1]) - mm := strings.Split(fields[2], ":") + mm := strings.SplitN(fields[2], ":", 3) if len(mm) != 2 { - return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + return nil, fmt.Errorf("parsing '%s' failed: unexpected major:minor pair %s", text, mm) } p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) p.Root, err = unescape(fields[3]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err) + return nil, fmt.Errorf("parsing '%s' failed: root: %w", fields[3], err) } p.Options = fields[5] // zero or more optional fields - switch { - case sepIdx == 6: - // zero, do nothing - case sepIdx == 7: - p.Optional = fields[6] - default: - p.Optional = strings.Join(fields[6:sepIdx-1], " ") - } + p.Optional = strings.Join(fields[6:sepIdx], " ") - // Run the filter after parsing all of the fields. + // Run the filter after parsing all fields. var skip, stop bool if filter != nil { skip, stop = filter(p) diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go new file mode 100644 index 000000000..f682c2d3b --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go @@ -0,0 +1,11 @@ +package mountinfo + +import "golang.org/x/sys/unix" + +func getMountinfo(entry *unix.Statfs_t) *Info { + return &Info{ + Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]), + FSType: unix.ByteSliceToString(entry.F_fstypename[:]), + Source: unix.ByteSliceToString(entry.F_mntfromname[:]), + } +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go index d33ebca09..c2e64bc81 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go @@ -1,4 +1,5 @@ -// +build !windows,!linux,!freebsd,!openbsd freebsd,!cgo openbsd,!cgo +//go:build !windows && !linux && !freebsd && !openbsd && !darwin +// +build !windows,!linux,!freebsd,!openbsd,!darwin package mountinfo diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index fbb43744d..b097728db 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.8.x + - 1.9.x - 1.x before_install: diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock index 2a3a69893..10ef81118 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock @@ -1,15 +1,9 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" + input-imports = [] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 2f4f4dbdc..a9bc5061b 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -26,10 +26,6 @@ ignored = [] -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - [prune] go-tests = true unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/go.mod b/vendor/github.com/modern-go/reflect2/go.mod new file mode 100644 index 000000000..9057e9b33 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go.mod @@ -0,0 +1,3 @@ +module github.com/modern-go/reflect2 + +go 1.12 diff --git a/vendor/github.com/modern-go/reflect2/go_above_118.go b/vendor/github.com/modern-go/reflect2/go_above_118.go new file mode 100644 index 000000000..2b4116f6c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_118.go @@ -0,0 +1,23 @@ +//+build go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer, it *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + var it hiter + mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj), &it) + return &UnsafeMapIterator{ + hiter: &it, + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go deleted file mode 100644 index 5c1cea868..000000000 --- a/vendor/github.com/modern-go/reflect2/go_above_17.go +++ /dev/null @@ -1,8 +0,0 @@ -//+build go1.7 - -package reflect2 - -import "unsafe" - -//go:linkname resolveTypeOff reflect.resolveTypeOff -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go index c7e3b7801..974f7685e 100644 --- a/vendor/github.com/modern-go/reflect2/go_above_19.go +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -6,6 +6,9 @@ import ( "unsafe" ) +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + //go:linkname makemap reflect.makemap func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) diff --git a/vendor/github.com/modern-go/reflect2/go_below_118.go b/vendor/github.com/modern-go/reflect2/go_below_118.go new file mode 100644 index 000000000..00003dbd7 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_118.go @@ -0,0 +1,21 @@ +//+build !go1.18 + +package reflect2 + +import ( + "unsafe" +) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) (val *hiter) + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go deleted file mode 100644 index 65a93c889..000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_17.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build !go1.7 - -package reflect2 - -import "unsafe" - -func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { - return nil -} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go deleted file mode 100644 index b050ef70c..000000000 --- a/vendor/github.com/modern-go/reflect2/go_below_19.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !go1.9 - -package reflect2 - -import ( - "unsafe" -) - -//go:linkname makemap reflect.makemap -func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) - -func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { - return makemap(rtype) -} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 63b49c799..c43c8b9d6 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -1,8 +1,9 @@ package reflect2 import ( - "github.com/modern-go/concurrent" "reflect" + "runtime" + "sync" "unsafe" ) @@ -130,13 +131,13 @@ var ConfigSafe = Config{UseSafeImplementation: true}.Froze() type frozenConfig struct { useSafeImplementation bool - cache *concurrent.Map + cache *sync.Map } func (cfg Config) Froze() *frozenConfig { return &frozenConfig{ useSafeImplementation: cfg.UseSafeImplementation, - cache: concurrent.NewMap(), + cache: new(sync.Map), } } @@ -288,11 +289,12 @@ func NoEscape(p unsafe.Pointer) unsafe.Pointer { } func UnsafeCastString(str string) []byte { + bytes := make([]byte, 0) stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) - sliceHeader := &reflect.SliceHeader{ - Data: stringHeader.Data, - Cap: stringHeader.Len, - Len: stringHeader.Len, - } - return *(*[]byte)(unsafe.Pointer(sliceHeader)) + sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) + sliceHeader.Data = stringHeader.Data + sliceHeader.Cap = stringHeader.Len + sliceHeader.Len = stringHeader.Len + runtime.KeepAlive(str) + return bytes } diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100644 index 3d2b9768c..000000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb5580..4b13c3155 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -1,17 +1,13 @@ +// +build !gccgo + package reflect2 import ( "reflect" - "runtime" - "strings" "sync" "unsafe" ) -// typelinks1 for 1.5 ~ 1.6 -//go:linkname typelinks1 reflect.typelinks -func typelinks1() [][]unsafe.Pointer - // typelinks2 for 1.7 ~ //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) @@ -27,49 +23,10 @@ func discoverTypes() { types = make(map[string]reflect.Type) packages = make(map[string]map[string]reflect.Type) - ver := runtime.Version() - if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { - loadGo15Types() - } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { - loadGo15Types() - } else { - loadGo17Types() - } -} - -func loadGo15Types() { - var obj interface{} = reflect.TypeOf(0) - typePtrss := typelinks1() - for _, typePtrs := range typePtrss { - for _, typePtr := range typePtrs { - (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr - typ := obj.(reflect.Type) - if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { - loadedType := typ.Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && - typ.Elem().Elem().Kind() == reflect.Struct { - loadedType := typ.Elem().Elem() - pkgTypes := packages[loadedType.PkgPath()] - if pkgTypes == nil { - pkgTypes = map[string]reflect.Type{} - packages[loadedType.PkgPath()] = pkgTypes - } - types[loadedType.String()] = loadedType - pkgTypes[loadedType.Name()] = loadedType - } - } - } + loadGoTypes() } -func loadGo17Types() { +func loadGoTypes() { var obj interface{} = reflect.TypeOf(0) sections, offset := typelinks2() for i, offs := range offset { diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go index 57229c8db..b49f614ef 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_link.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -19,18 +19,12 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int //go:linkname mapassign reflect.mapassign //go:noescape -func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer) //go:linkname mapaccess reflect.mapaccess //go:noescape func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) -// m escapes into the return value, but the caller of mapiterinit -// doesn't let the return value escape. -//go:noescape -//go:linkname mapiterinit reflect.mapiterinit -func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter - //go:noescape //go:linkname mapiternext reflect.mapiternext func mapiternext(it *hiter) @@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate // the layout of this structure. type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). - value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). - // rest fields are ignored + key unsafe.Pointer + value unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow *[]unsafe.Pointer + oldoverflow *[]unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr } // add returns p+x. diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go index f2e76e6bb..37872da81 100644 --- a/vendor/github.com/modern-go/reflect2/unsafe_map.go +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -107,14 +107,6 @@ func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { return type2.UnsafeIterate(objEFace.data) } -func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { - return &UnsafeMapIterator{ - hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), - pKeyRType: type2.pKeyRType, - pElemRType: type2.pElemRType, - } -} - type UnsafeMapIterator struct { *hiter pKeyRType unsafe.Pointer diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go new file mode 100644 index 000000000..4b03d4c71 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go @@ -0,0 +1,16 @@ +package apparmor + +import "errors" + +var ( + // IsEnabled returns true if apparmor is enabled for the host. + IsEnabled = isEnabled + + // ApplyProfile will apply the profile with the specified name to the process after + // the next exec. It is only supported on Linux and produces an ErrApparmorNotEnabled + // on other platforms. + ApplyProfile = applyProfile + + // ErrApparmorNotEnabled indicates that AppArmor is not enabled or not supported. + ErrApparmorNotEnabled = errors.New("apparmor: config provided but apparmor not supported") +) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go index 5da14fb3b..8b1483c7d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go @@ -3,7 +3,6 @@ package apparmor import ( "errors" "fmt" - "io/ioutil" "os" "sync" @@ -15,11 +14,11 @@ var ( checkAppArmor sync.Once ) -// IsEnabled returns true if apparmor is enabled for the host. -func IsEnabled() bool { +// isEnabled returns true if apparmor is enabled for the host. +func isEnabled() bool { checkAppArmor.Do(func() { if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled") appArmorEnabled = err == nil && len(buf) > 1 && buf[0] == 'Y' } }) @@ -52,14 +51,15 @@ func setProcAttr(attr, value string) error { // changeOnExec reimplements aa_change_onexec from libapparmor in Go func changeOnExec(name string) error { if err := setProcAttr("exec", "exec "+name); err != nil { - return fmt.Errorf("apparmor failed to apply profile: %s", err) + return fmt.Errorf("apparmor failed to apply profile: %w", err) } return nil } -// ApplyProfile will apply the profile with the specified name to the process after -// the next exec. -func ApplyProfile(name string) error { +// applyProfile will apply the profile with the specified name to the process after +// the next exec. It is only supported on Linux and produces an error on other +// platforms. +func applyProfile(name string) error { if name == "" { return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go index 0bc473f81..684248f25 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go @@ -1,18 +1,13 @@ +//go:build !linux // +build !linux package apparmor -import ( - "errors" -) - -var ErrApparmorNotEnabled = errors.New("apparmor: config provided but apparmor not supported") - -func IsEnabled() bool { +func isEnabled() bool { return false } -func ApplyProfile(name string) error { +func applyProfile(name string) error { if name != "" { return ErrApparmorNotEnabled } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index 68a346ca5..ba2b2266c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups import ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go deleted file mode 100644 index 278d507e2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package cgroups diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go new file mode 100644 index 000000000..0cdaf7478 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go @@ -0,0 +1,190 @@ +package cgroups + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "sync" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// OpenFile opens a cgroup file in a given dir with given flags. +// It is supposed to be used for cgroup files only, and returns +// an error if the file is not a cgroup file. +// +// Arguments dir and file are joined together to form an absolute path +// to a file being opened. +func OpenFile(dir, file string, flags int) (*os.File, error) { + if dir == "" { + return nil, fmt.Errorf("no directory specified for %s", file) + } + return openFile(dir, file, flags) +} + +// ReadFile reads data from a cgroup file in dir. +// It is supposed to be used for cgroup files only. +func ReadFile(dir, file string) (string, error) { + fd, err := OpenFile(dir, file, unix.O_RDONLY) + if err != nil { + return "", err + } + defer fd.Close() + var buf bytes.Buffer + + _, err = buf.ReadFrom(fd) + return buf.String(), err +} + +// WriteFile writes data to a cgroup file in dir. +// It is supposed to be used for cgroup files only. +func WriteFile(dir, file, data string) error { + fd, err := OpenFile(dir, file, unix.O_WRONLY) + if err != nil { + return err + } + defer fd.Close() + if err := retryingWriteFile(fd, data); err != nil { + // Having data in the error message helps in debugging. + return fmt.Errorf("failed to write %q: %w", data, err) + } + return nil +} + +func retryingWriteFile(fd *os.File, data string) error { + for { + _, err := fd.Write([]byte(data)) + if errors.Is(err, unix.EINTR) { + logrus.Infof("interrupted while writing %s to %s", data, fd.Name()) + continue + } + return err + } +} + +const ( + cgroupfsDir = "/sys/fs/cgroup" + cgroupfsPrefix = cgroupfsDir + "/" +) + +var ( + // TestMode is set to true by unit tests that need "fake" cgroupfs. + TestMode bool + + cgroupFd int = -1 + prepOnce sync.Once + prepErr error + resolveFlags uint64 +) + +func prepareOpenat2() error { + prepOnce.Do(func() { + fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{ + Flags: unix.O_DIRECTORY | unix.O_PATH, + }) + if err != nil { + prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} + if err != unix.ENOSYS { //nolint:errorlint // unix errors are bare + logrus.Warnf("falling back to securejoin: %s", prepErr) + } else { + logrus.Debug("openat2 not available, falling back to securejoin") + } + return + } + var st unix.Statfs_t + if err = unix.Fstatfs(fd, &st); err != nil { + prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err} + logrus.Warnf("falling back to securejoin: %s", prepErr) + return + } + + cgroupFd = fd + + resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS + if st.Type == unix.CGROUP2_SUPER_MAGIC { + // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks + resolveFlags |= unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_SYMLINKS + } + }) + + return prepErr +} + +func openFile(dir, file string, flags int) (*os.File, error) { + mode := os.FileMode(0) + if TestMode && flags&os.O_WRONLY != 0 { + // "emulate" cgroup fs for unit tests + flags |= os.O_TRUNC | os.O_CREATE + mode = 0o600 + } + path := path.Join(dir, file) + if prepareOpenat2() != nil { + return openFallback(path, flags, mode) + } + relPath := strings.TrimPrefix(path, cgroupfsPrefix) + if len(relPath) == len(path) { // non-standard path, old system? + return openFallback(path, flags, mode) + } + + fd, err := unix.Openat2(cgroupFd, relPath, + &unix.OpenHow{ + Resolve: resolveFlags, + Flags: uint64(flags) | unix.O_CLOEXEC, + Mode: uint64(mode), + }) + if err != nil { + err = &os.PathError{Op: "openat2", Path: path, Err: err} + // Check if cgroupFd is still opened to cgroupfsDir + // (happens when this package is incorrectly used + // across the chroot/pivot_root/mntns boundary, or + // when /sys/fs/cgroup is remounted). + // + // TODO: if such usage will ever be common, amend this + // to reopen cgroupFd and retry openat2. + fdStr := strconv.Itoa(cgroupFd) + fdDest, _ := os.Readlink("/proc/self/fd/" + fdStr) + if fdDest != cgroupfsDir { + // Wrap the error so it is clear that cgroupFd + // is opened to an unexpected/wrong directory. + err = fmt.Errorf("cgroupFd %s unexpectedly opened to %s != %s: %w", + fdStr, fdDest, cgroupfsDir, err) + } + return nil, err + } + + return os.NewFile(uintptr(fd), path), nil +} + +var errNotCgroupfs = errors.New("not a cgroup file") + +// Can be changed by unit tests. +var openFallback = openAndCheck + +// openAndCheck is used when openat2(2) is not available. It checks the opened +// file is on cgroupfs, returning an error otherwise. +func openAndCheck(path string, flags int, mode os.FileMode) (*os.File, error) { + fd, err := os.OpenFile(path, flags, mode) + if err != nil { + return nil, err + } + if TestMode { + return fd, nil + } + // Check this is a cgroupfs file. + var st unix.Statfs_t + if err := unix.Fstatfs(int(fd.Fd()), &st); err != nil { + _ = fd.Close() + return nil, &os.PathError{Op: "statfs", Path: path, Err: err} + } + if st.Type != unix.CGROUP_SUPER_MAGIC && st.Type != unix.CGROUP2_SUPER_MAGIC { + _ = fd.Close() + return nil, &os.PathError{Op: "open", Path: path, Err: errNotCgroupfs} + } + + return fd, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go deleted file mode 100644 index ae2613cdb..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -package fscommon - -import ( - "bytes" - "os" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// WriteFile writes data to a cgroup file in dir. -// It is supposed to be used for cgroup files only. -func WriteFile(dir, file, data string) error { - fd, err := OpenFile(dir, file, unix.O_WRONLY) - if err != nil { - return err - } - defer fd.Close() - if err := retryingWriteFile(fd, data); err != nil { - return errors.Wrapf(err, "failed to write %q", data) - } - return nil -} - -// ReadFile reads data from a cgroup file in dir. -// It is supposed to be used for cgroup files only. -func ReadFile(dir, file string) (string, error) { - fd, err := OpenFile(dir, file, unix.O_RDONLY) - if err != nil { - return "", err - } - defer fd.Close() - var buf bytes.Buffer - - _, err = buf.ReadFrom(fd) - return buf.String(), err -} - -func retryingWriteFile(fd *os.File, data string) error { - for { - _, err := fd.Write([]byte(data)) - if errors.Is(err, unix.EINTR) { - logrus.Infof("interrupted while writing %s to %s", data, fd.Name()) - continue - } - return err - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go deleted file mode 100644 index 49af83b3c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go +++ /dev/null @@ -1,120 +0,0 @@ -package fscommon - -import ( - "os" - "strings" - "sync" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - cgroupfsDir = "/sys/fs/cgroup" - cgroupfsPrefix = cgroupfsDir + "/" -) - -var ( - // TestMode is set to true by unit tests that need "fake" cgroupfs. - TestMode bool - - cgroupFd int = -1 - prepOnce sync.Once - prepErr error - resolveFlags uint64 -) - -func prepareOpenat2() error { - prepOnce.Do(func() { - fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{ - Flags: unix.O_DIRECTORY | unix.O_PATH}) - if err != nil { - prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} - if err != unix.ENOSYS { - logrus.Warnf("falling back to securejoin: %s", prepErr) - } else { - logrus.Debug("openat2 not available, falling back to securejoin") - } - return - } - var st unix.Statfs_t - if err = unix.Fstatfs(fd, &st); err != nil { - prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err} - logrus.Warnf("falling back to securejoin: %s", prepErr) - return - } - - cgroupFd = fd - - resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS - if st.Type == unix.CGROUP2_SUPER_MAGIC { - // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks - resolveFlags |= unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_SYMLINKS - } - - }) - - return prepErr -} - -// OpenFile opens a cgroup file in a given dir with given flags. -// It is supposed to be used for cgroup files only. -func OpenFile(dir, file string, flags int) (*os.File, error) { - if dir == "" { - return nil, errors.Errorf("no directory specified for %s", file) - } - mode := os.FileMode(0) - if TestMode && flags&os.O_WRONLY != 0 { - // "emulate" cgroup fs for unit tests - flags |= os.O_TRUNC | os.O_CREATE - mode = 0o600 - } - if prepareOpenat2() != nil { - return openFallback(dir, file, flags, mode) - } - reldir := strings.TrimPrefix(dir, cgroupfsPrefix) - if len(reldir) == len(dir) { // non-standard path, old system? - return openFallback(dir, file, flags, mode) - } - - relname := reldir + "/" + file - fd, err := unix.Openat2(cgroupFd, relname, - &unix.OpenHow{ - Resolve: resolveFlags, - Flags: uint64(flags) | unix.O_CLOEXEC, - Mode: uint64(mode), - }) - if err != nil { - return nil, &os.PathError{Op: "openat2", Path: dir + "/" + file, Err: err} - } - - return os.NewFile(uintptr(fd), cgroupfsPrefix+relname), nil -} - -var errNotCgroupfs = errors.New("not a cgroup file") - -// openFallback is used when openat2(2) is not available. It checks the opened -// file is on cgroupfs, returning an error otherwise. -func openFallback(dir, file string, flags int, mode os.FileMode) (*os.File, error) { - path := dir + "/" + file - fd, err := os.OpenFile(path, flags, mode) - if err != nil { - return nil, err - } - if TestMode { - return fd, nil - } - // Check this is a cgroupfs file. - var st unix.Statfs_t - if err := unix.Fstatfs(int(fd.Fd()), &st); err != nil { - _ = fd.Close() - return nil, &os.PathError{Op: "statfs", Path: path, Err: err} - } - if st.Type != unix.CGROUP_SUPER_MAGIC && st.Type != unix.CGROUP2_SUPER_MAGIC { - _ = fd.Close() - return nil, &os.PathError{Op: "open", Path: path, Err: errNotCgroupfs} - } - - return fd, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go deleted file mode 100644 index db0caded1..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build linux - -package fscommon - -import ( - "errors" - "fmt" - "math" - "strconv" - "strings" -) - -var ( - ErrNotValidFormat = errors.New("line is not a valid key value format") -) - -// ParseUint converts a string to an uint64 integer. -// Negative values are returned at zero as, due to kernel bugs, -// some of the memory cgroup stats can be negative. -func ParseUint(s string, base, bitSize int) (uint64, error) { - value, err := strconv.ParseUint(s, base, bitSize) - if err != nil { - intValue, intErr := strconv.ParseInt(s, base, bitSize) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { - return 0, nil - } - - return value, err - } - - return value, nil -} - -// ParseKeyValue parses a space-separated "name value" kind of cgroup -// parameter and returns its key as a string, and its value as uint64 -// (ParseUint is used to convert the value). For example, -// "io_service_bytes 1234" will be returned as "io_service_bytes", 1234. -func ParseKeyValue(t string) (string, uint64, error) { - parts := strings.SplitN(t, " ", 3) - if len(parts) != 2 { - return "", 0, fmt.Errorf("line %q is not in key value format", t) - } - - value, err := ParseUint(parts[1], 10, 64) - if err != nil { - return "", 0, fmt.Errorf("unable to convert to uint64: %v", err) - } - - return parts[0], value, nil -} - -// GetValueByKey reads a key-value pairs from the specified cgroup file, -// and returns a value of the specified key. ParseUint is used for value -// conversion. -func GetValueByKey(path, file, key string) (uint64, error) { - content, err := ReadFile(path, file) - if err != nil { - return 0, err - } - - lines := strings.Split(string(content), "\n") - for _, line := range lines { - arr := strings.Split(line, " ") - if len(arr) == 2 && arr[0] == key { - return ParseUint(arr[1], 10, 64) - } - } - - return 0, nil -} - -// GetCgroupParamUint reads a single uint64 value from the specified cgroup file. -// If the value read is "max", the math.MaxUint64 is returned. -func GetCgroupParamUint(path, file string) (uint64, error) { - contents, err := GetCgroupParamString(path, file) - if err != nil { - return 0, err - } - contents = strings.TrimSpace(contents) - if contents == "max" { - return math.MaxUint64, nil - } - - res, err := ParseUint(contents, 10, 64) - if err != nil { - return res, fmt.Errorf("unable to parse file %q", path+"/"+file) - } - return res, nil -} - -// GetCgroupParamInt reads a single int64 value from specified cgroup file. -// If the value read is "max", the math.MaxInt64 is returned. -func GetCgroupParamInt(path, file string) (int64, error) { - contents, err := ReadFile(path, file) - if err != nil { - return 0, err - } - contents = strings.TrimSpace(contents) - if contents == "max" { - return math.MaxInt64, nil - } - - res, err := strconv.ParseInt(contents, 10, 64) - if err != nil { - return res, fmt.Errorf("unable to parse %q as a int from Cgroup file %q", contents, path+"/"+file) - } - return res, nil -} - -// GetCgroupParamString reads a string from the specified cgroup file. -func GetCgroupParamString(path, file string) (string, error) { - contents, err := ReadFile(path, file) - if err != nil { - return "", err - } - - return strings.TrimSpace(contents), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go new file mode 100644 index 000000000..1355a5101 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go @@ -0,0 +1,27 @@ +package cgroups + +import ( + "io/fs" + "path/filepath" +) + +// GetAllPids returns all pids from the cgroup identified by path, and all its +// sub-cgroups. +func GetAllPids(path string) ([]int, error) { + var pids []int + err := filepath.WalkDir(path, func(p string, d fs.DirEntry, iErr error) error { + if iErr != nil { + return iErr + } + if !d.IsDir() { + return nil + } + cPids, err := readProcsFile(p) + if err != nil { + return err + } + pids = append(pids, cPids...) + return nil + }) + return pids, err +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go index e7f9c4626..40a81dd5a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups type ThrottlingData struct { @@ -126,7 +124,7 @@ type BlkioStatEntry struct { } type BlkioStats struct { - // number of bytes tranferred to and from the block device + // number of bytes transferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` @@ -146,6 +144,17 @@ type HugetlbStats struct { Failcnt uint64 `json:"failcnt"` } +type RdmaEntry struct { + Device string `json:"device,omitempty"` + HcaHandles uint32 `json:"hca_handles,omitempty"` + HcaObjects uint32 `json:"hca_objects,omitempty"` +} + +type RdmaStats struct { + RdmaLimit []RdmaEntry `json:"rdma_limit,omitempty"` + RdmaCurrent []RdmaEntry `json:"rdma_current,omitempty"` +} + type Stats struct { CpuStats CpuStats `json:"cpu_stats,omitempty"` CPUSetStats CPUSetStats `json:"cpuset_stats,omitempty"` @@ -154,6 +163,7 @@ type Stats struct { BlkioStats BlkioStats `json:"blkio_stats,omitempty"` // the map is in the format "size of hugepage: stats of the hugepage" HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` + RdmaStats RdmaStats `json:"rdma_stats,omitempty"` } func NewStats() *Stats { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index 35ce2c1c2..b32af4ee5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups import ( @@ -7,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -15,7 +12,6 @@ import ( "sync" "time" - "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/userns" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -24,11 +20,14 @@ import ( const ( CgroupProcesses = "cgroup.procs" unifiedMountpoint = "/sys/fs/cgroup" + hybridMountpoint = "/sys/fs/cgroup/unified" ) var ( isUnifiedOnce sync.Once isUnified bool + isHybridOnce sync.Once + isHybrid bool ) // IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. @@ -50,6 +49,24 @@ func IsCgroup2UnifiedMode() bool { return isUnified } +// IsCgroup2HybridMode returns whether we are running in cgroup v2 hybrid mode. +func IsCgroup2HybridMode() bool { + isHybridOnce.Do(func() { + var st unix.Statfs_t + err := unix.Statfs(hybridMountpoint, &st) + if err != nil { + isHybrid = false + if !os.IsNotExist(err) { + // Report unexpected errors. + logrus.WithError(err).Debugf("statfs(%q) failed", hybridMountpoint) + } + return + } + isHybrid = st.Type == unix.CGROUP2_SUPER_MAGIC + }) + return isHybrid +} + type Mount struct { Mountpoint string Root string @@ -88,7 +105,7 @@ func GetAllSubsystems() ([]string, error) { // - freezer: implemented in kernel 5.2 // We assume these are always available, as it is hard to detect availability. pseudo := []string{"devices", "freezer"} - data, err := fscommon.ReadFile("/sys/fs/cgroup", "cgroup.controllers") + data, err := ReadFile("/sys/fs/cgroup", "cgroup.controllers") if err != nil { return nil, err } @@ -119,8 +136,8 @@ func GetAllSubsystems() ([]string, error) { return subsystems, nil } -func readProcsFile(file string) ([]int, error) { - f, err := os.Open(file) +func readProcsFile(dir string) ([]int, error) { + f, err := OpenFile(dir, CgroupProcesses, os.O_RDONLY) if err != nil { return nil, err } @@ -211,7 +228,7 @@ func EnterPid(cgroupPaths map[string]string, pid int) error { func rmdir(path string) error { err := unix.Rmdir(path) - if err == nil || err == unix.ENOENT { + if err == nil || err == unix.ENOENT { //nolint:errorlint // unix errors are bare return nil } return &os.PathError{Op: "rmdir", Path: path, Err: err} @@ -225,7 +242,7 @@ func RemovePath(path string) error { return nil } - infos, err := ioutil.ReadDir(path) + infos, err := os.ReadDir(path) if err != nil { if os.IsNotExist(err) { err = nil @@ -267,7 +284,6 @@ func RemovePaths(paths map[string]string) (err error) { case retries - 1: logrus.WithError(err).Error("Failed to remove cgroup") } - } _, err := os.Stat(p) // We need this strange way of checking cgroups existence because @@ -286,40 +302,61 @@ func RemovePaths(paths map[string]string) (err error) { return fmt.Errorf("Failed to remove paths: %v", paths) } -func GetHugePageSize() ([]string, error) { - dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0) - if err != nil { - return nil, err - } - files, err := dir.Readdirnames(0) - dir.Close() - if err != nil { - return nil, err - } +var ( + hugePageSizes []string + initHPSOnce sync.Once +) - return getHugePageSizeFromFilenames(files) +func HugePageSizes() []string { + initHPSOnce.Do(func() { + dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return + } + files, err := dir.Readdirnames(0) + dir.Close() + if err != nil { + return + } + + hugePageSizes, err = getHugePageSizeFromFilenames(files) + if err != nil { + logrus.Warn("HugePageSizes: ", err) + } + }) + + return hugePageSizes } func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { pageSizes := make([]string, 0, len(fileNames)) + var warn error for _, file := range fileNames { // example: hugepages-1048576kB val := strings.TrimPrefix(file, "hugepages-") if len(val) == len(file) { - // unexpected file name: no prefix found + // Unexpected file name: no prefix found, ignore it. continue } - // The suffix is always "kB" (as of Linux 5.9) + // The suffix is always "kB" (as of Linux 5.13). If we find + // something else, produce an error but keep going. eLen := len(val) - 2 val = strings.TrimSuffix(val, "kB") if len(val) != eLen { - logrus.Warnf("GetHugePageSize: %s: invalid filename suffix (expected \"kB\")", file) + // Highly unlikely. + if warn == nil { + warn = errors.New(file + `: invalid suffix (expected "kB")`) + } continue } size, err := strconv.Atoi(val) if err != nil { - return nil, err + // Highly unlikely. + if warn == nil { + warn = fmt.Errorf("%s: %w", file, err) + } + continue } // Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574 // but in our case the size is in KB already. @@ -333,34 +370,12 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { pageSizes = append(pageSizes, val) } - return pageSizes, nil + return pageSizes, warn } // GetPids returns all pids, that were added to cgroup at path. func GetPids(dir string) ([]int, error) { - return readProcsFile(filepath.Join(dir, CgroupProcesses)) -} - -// GetAllPids returns all pids, that were added to cgroup at path and to all its -// subcgroups. -func GetAllPids(path string) ([]int, error) { - var pids []int - // collect pids from all sub-cgroups - err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error { - if iErr != nil { - return iErr - } - if info.IsDir() || info.Name() != CgroupProcesses { - return nil - } - cPids, err := readProcsFile(p) - if err != nil { - return err - } - pids = append(pids, cPids...) - return nil - }) - return pids, err + return readProcsFile(dir) } // WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file @@ -376,9 +391,9 @@ func WriteCgroupProc(dir string, pid int) error { return nil } - file, err := fscommon.OpenFile(dir, CgroupProcesses, os.O_WRONLY) + file, err := OpenFile(dir, CgroupProcesses, os.O_WRONLY) if err != nil { - return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err) + return fmt.Errorf("failed to write %v: %w", pid, err) } defer file.Close() @@ -395,7 +410,7 @@ func WriteCgroupProc(dir string, pid int) error { continue } - return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err) + return fmt.Errorf("failed to write %v: %w", pid, err) } return err } @@ -448,5 +463,5 @@ func ConvertBlkIOToIOWeightValue(blkIoWeight uint16) uint64 { if blkIoWeight == 0 { return 0 } - return uint64(1 + (uint64(blkIoWeight)-10)*9999/990) + return 1 + (uint64(blkIoWeight)-10)*9999/990 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go index 95ec9dff0..47c75f22b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go @@ -46,11 +46,8 @@ func NewNotFoundError(sub string) error { } func IsNotFound(err error) bool { - if err == nil { - return false - } - _, ok := err.(*NotFoundError) - return ok + var nfErr *NotFoundError + return errors.As(err, &nfErr) } func tryDefaultPath(cgroupPath, subsystem string) string { @@ -116,6 +113,11 @@ func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { return "", errUnified } + // If subsystem is empty, we look for the cgroupv2 hybrid path. + if len(subsystem) == 0 { + return hybridMountpoint, nil + } + // Avoid parsing mountinfo by trying the default path first, if possible. if path := tryDefaultPath(cgroupPath, subsystem); path != "" { return path, nil @@ -154,7 +156,7 @@ func findCgroupMountpointAndRootFromMI(mounts []*mountinfo.Info, cgroupPath, sub func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { if len(m.Subsystems) == 0 { - return "", fmt.Errorf("no subsystem for mount") + return "", errors.New("no subsystem for mount") } return getControllerPath(m.Subsystems[0], cgroups) @@ -226,6 +228,11 @@ func GetOwnCgroupPath(subsystem string) (string, error) { return "", err } + // If subsystem is empty, we look for the cgroupv2 hybrid path. + if len(subsystem) == 0 { + return hybridMountpoint, nil + } + return getCgroupPathHelper(subsystem, cgroup) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index 87d0da842..2d4a89871 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -13,12 +13,12 @@ const ( Thawed FreezerState = "THAWED" ) +// Cgroup holds properties of a cgroup on Linux. type Cgroup struct { - // Deprecated, use Path instead + // Name specifies the name of the cgroup Name string `json:"name,omitempty"` - // name of parent of cgroup or slice - // Deprecated, use Path instead + // Parent specifies the name of parent of cgroup or slice Parent string `json:"parent,omitempty"` // Path specifies the path to cgroups that are created and/or joined by the container. @@ -28,17 +28,26 @@ type Cgroup struct { // ScopePrefix describes prefix for the scope name ScopePrefix string `json:"scope_prefix"` - // Paths represent the absolute cgroups paths to join. - // This takes precedence over Path. - Paths map[string]string - // Resources contains various cgroups settings to apply *Resources + // Systemd tells if systemd should be used to manage cgroups. + Systemd bool + // SystemdProps are any additional properties for systemd, // derived from org.systemd.property.xxx annotations. // Ignored unless systemd is used for managing cgroups. SystemdProps []systemdDbus.Property `json:"-"` + + // Rootless tells if rootless cgroups should be used. + Rootless bool + + // The host UID that should own the cgroup, or nil to accept + // the default ownership. This should only be set when the + // cgroupfs is to be mounted read/write. + // Not all cgroup manager implementations support changing + // the ownership. + OwnerUID *int `json:"owner_uid,omitempty"` } type Resources struct { @@ -117,6 +126,9 @@ type Resources struct { // Set class identifier for container's network packets NetClsClassid uint32 `json:"net_cls_classid_u"` + // Rdma resource restriction configuration + Rdma map[string]LinuxRdma `json:"rdma"` + // Used on cgroups v2: // CpuWeight sets a proportional bandwidth limit. @@ -127,8 +139,20 @@ type Resources struct { // SkipDevices allows to skip configuring device permissions. // Used by e.g. kubelet while creating a parent cgroup (kubepods) - // common for many containers. + // common for many containers, and by runc update. // // NOTE it is impossible to start a container which has this flag set. - SkipDevices bool `json:"skip_devices"` + SkipDevices bool `json:"-"` + + // SkipFreezeOnSet is a flag for cgroup manager to skip the cgroup + // freeze when setting resources. Only applicable to systemd legacy + // (i.e. cgroup v1) manager (which uses freeze by default to avoid + // spurious permission errors caused by systemd inability to update + // device rules in a non-disruptive manner). + // + // If not set, a few methods (such as looking into cgroup's + // devices.list and querying the systemd unit properties) are used + // during Set() to figure out whether the freeze is required. Those + // methods may be relatively slow, thus this flag. + SkipFreezeOnSet bool `json:"-"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go index c0c23d700..7e383020f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go @@ -1,8 +1,9 @@ +//go:build !linux // +build !linux package configs +// Cgroup holds properties of a cgroup on Linux // TODO Windows: This can ultimately be entirely factored out on Windows as // cgroups are a Unix-specific construct. -type Cgroup struct { -} +type Cgroup struct{} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 14a096038..c1b4a0041 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -7,10 +7,10 @@ import ( "os/exec" "time" + "github.com/sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type Rlimit struct { @@ -31,10 +31,12 @@ type IDMap struct { // for syscalls. Additional architectures can be added by specifying them in // Architectures. type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` - DefaultErrnoRet *uint `json:"default_errno_ret"` + DefaultAction Action `json:"default_action"` + Architectures []string `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` + DefaultErrnoRet *uint `json:"default_errno_ret"` + ListenerPath string `json:"listener_path,omitempty"` + ListenerMetadata string `json:"listener_metadata,omitempty"` } // Action is taken upon rule match in Seccomp @@ -47,6 +49,9 @@ const ( Allow Trace Log + Notify + KillThread + KillProcess ) // Operator is a comparison operator to be used when matching syscall arguments in Seccomp @@ -208,9 +213,11 @@ type Config struct { RootlessCgroups bool `json:"rootless_cgroups,omitempty"` } -type HookName string -type HookList []Hook -type Hooks map[HookName]HookList +type ( + HookName string + HookList []Hook + Hooks map[HookName]HookList +) const ( // Prestart commands are executed after the container namespaces are created, @@ -244,6 +251,19 @@ const ( Poststop HookName = "poststop" ) +// KnownHookNames returns the known hook names. +// Used by `runc features`. +func KnownHookNames() []string { + return []string{ + string(Prestart), // deprecated + string(CreateRuntime), + string(CreateContainer), + string(StartContainer), + string(Poststart), + string(Poststop), + } +} + type Capabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string @@ -260,7 +280,7 @@ type Capabilities struct { func (hooks HookList) RunHooks(state *specs.State) error { for i, h := range hooks { if err := h.Run(state); err != nil { - return errors.Wrapf(err, "Running hook #%d:", i) + return fmt.Errorf("error running hook #%d: %w", i, err) } } @@ -373,7 +393,7 @@ func (c Command) Run(s *specs.State) error { go func() { err := cmd.Wait() if err != nil { - err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) + err = fmt.Errorf("error running hook: %w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) } errC <- err }() @@ -387,7 +407,7 @@ func (c Command) Run(s *specs.State) error { case err := <-errC: return err case <-timerCh: - cmd.Process.Kill() + _ = cmd.Process.Kill() <-errC return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go index 07da10804..8c02848b7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -1,17 +1,24 @@ package configs -import "fmt" +import "errors" + +var ( + errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.") + errNoUserMap = errors.New("User namespaces enabled, but no user mapping found.") + errNoGIDMap = errors.New("User namespaces enabled, but no gid mappings found.") + errNoGroupMap = errors.New("User namespaces enabled, but no group mapping found.") +) // HostUID gets the translated uid for the process on host which could be // different when user namespaces are enabled. func (c Config) HostUID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { if c.UidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.") + return -1, errNoUIDMap } id, found := c.hostIDFromMapping(containerId, c.UidMappings) if !found { - return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.") + return -1, errNoUserMap } return id, nil } @@ -30,11 +37,11 @@ func (c Config) HostRootUID() (int, error) { func (c Config) HostGID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { if c.GidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.") + return -1, errNoGIDMap } id, found := c.hostIDFromMapping(containerId, c.GidMappings) if !found { - return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.") + return -1, errNoGroupMap } return id, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go index 93bf41c8d..bce829e29 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/devices.go deleted file mode 100644 index b9e3664ce..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/devices.go +++ /dev/null @@ -1,17 +0,0 @@ -package configs - -import "github.com/opencontainers/runc/libcontainer/devices" - -type ( - // Deprecated: use libcontainer/devices.Device - Device = devices.Device - - // Deprecated: use libcontainer/devices.Rule - DeviceRule = devices.Rule - - // Deprecated: use libcontainer/devices.Type - DeviceType = devices.Type - - // Deprecated: use libcontainer/devices.Permissions - DevicePermissions = devices.Permissions -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go index 57e9f037d..f8d951ab8 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go @@ -1,6 +1,9 @@ package configs type IntelRdt struct { + // The identity for RDT Class of Service + ClosID string `json:"closID,omitempty"` + // The schema for L3 cache id and capacity bitmask (CBM) // Format: "L3:=;=;..." L3CacheSchema string `json:"l3_cache_schema,omitempty"` diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go index 670757ddb..784c61820 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go @@ -1,9 +1,11 @@ package configs +import "golang.org/x/sys/unix" + const ( // EXT_COPYUP is a directive to copy up the contents of a directory when // a tmpfs is mounted over it. - EXT_COPYUP = 1 << iota + EXT_COPYUP = 1 << iota //nolint:golint // ignore "don't use ALL_CAPS" warning ) type Mount struct { @@ -28,6 +30,9 @@ type Mount struct { // Relabel source if set, "z" indicates shared, "Z" indicates unshared. Relabel string `json:"relabel"` + // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). + RecAttr *unix.MountAttr `json:"rec_attr"` + // Extensions are additional flags that are specific to runc. Extensions int `json:"extensions"` @@ -37,3 +42,7 @@ type Mount struct { // Optional Command to be run after Source is mounted. PostmountCmds []Command `json:"postmount_cmds"` } + +func (m *Mount) IsBind() bool { + return m.Flags&unix.MS_BIND != 0 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go index 2dc7adfc9..0516dba8d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go index 5d9a5c81f..fbb0d4907 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go index 19bf713de..946db30a5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go @@ -1,8 +1,8 @@ +//go:build !linux // +build !linux package configs // Namespace defines configuration for each namespace. It specifies an // alternate path that is able to be joined via setns. -type Namespace struct { -} +type Namespace struct{} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go index ccdb228e1..c44c3ea71 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go @@ -50,7 +50,10 @@ type Network struct { HairpinMode bool `json:"hairpin_mode"` } -// Routes can be specified to create entries in the route table as the container is started +// Route defines a routing table entry. +// +// Routes can be specified to create entries in the routing table as the container +// is started. // // All of destination, source, and gateway should be either IPv4 or IPv6. // One of the three options must be present, and omitted entries will use their @@ -58,15 +61,15 @@ type Network struct { // gateway to 1.2.3.4 and the interface to eth0 will set up a standard // destination of 0.0.0.0(or *) when viewed in the route table. type Route struct { - // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + // Destination specifies the destination IP address and mask in the CIDR form. Destination string `json:"destination"` - // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + // Source specifies the source IP address and mask in the CIDR form. Source string `json:"source"` - // Sets the gateway. Accepts IPv4 and IPv6 + // Gateway specifies the gateway IP address. Gateway string `json:"gateway"` - // The device to set this route up for, for example: eth0 + // InterfaceName specifies the device to set this route up for, for example eth0. InterfaceName string `json:"interface_name"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go new file mode 100644 index 000000000..c69f2c802 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go @@ -0,0 +1,9 @@ +package configs + +// LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) +type LinuxRdma struct { + // Maximum number of HCA handles that can be opened. Default is "no limit". + HcaHandles *uint32 `json:"hca_handles,omitempty"` + // Maximum number of HCA objects that can be created. Default is "no limit". + HcaObjects *uint32 `json:"hca_objects,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go index acb816998..7d8e9fc31 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go @@ -1,25 +1,23 @@ +//go:build !windows // +build !windows package devices import ( "errors" - "io/ioutil" "os" "path/filepath" "golang.org/x/sys/unix" ) -var ( - // ErrNotADevice denotes that a file is not a valid linux device. - ErrNotADevice = errors.New("not a device node") -) +// ErrNotADevice denotes that a file is not a valid linux device. +var ErrNotADevice = errors.New("not a device node") // Testing dependencies var ( - unixLstat = unix.Lstat - ioutilReadDir = ioutil.ReadDir + unixLstat = unix.Lstat + osReadDir = os.ReadDir ) func mkDev(d *Rule) (uint64, error) { @@ -29,8 +27,9 @@ func mkDev(d *Rule) (uint64, error) { return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil } -// Given the path to a device and its cgroup_permissions(which cannot be easily queried) look up the -// information about a linux device and return that information as a Device struct. +// DeviceFromPath takes the path to a device and its cgroup_permissions (which +// cannot be easily queried) to look up the information about a linux device +// and returns that information as a Device struct. func DeviceFromPath(path, permissions string) (*Device, error) { var stat unix.Stat_t err := unixLstat(path, &stat) @@ -41,7 +40,7 @@ func DeviceFromPath(path, permissions string) (*Device, error) { var ( devType Type mode = stat.Mode - devNumber = uint64(stat.Rdev) + devNumber = uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS. major = unix.Major(devNumber) minor = unix.Minor(devNumber) ) @@ -77,7 +76,7 @@ func HostDevices() ([]*Device, error) { // GetDevices recursively traverses a directory specified by path // and returns all devices found there. func GetDevices(path string) ([]*Device, error) { - files, err := ioutilReadDir(path) + files, err := osReadDir(path) if err != nil { return nil, err } @@ -104,7 +103,7 @@ func GetDevices(path string) ([]*Device, error) { } device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") if err != nil { - if err == ErrNotADevice { + if errors.Is(err, ErrNotADevice) { continue } if os.IsNotExist(err) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go deleted file mode 100644 index 4379a2070..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build linux - -package system - -import ( - "os/exec" - "unsafe" - - "golang.org/x/sys/unix" -) - -type ParentDeathSignal int - -func (p ParentDeathSignal) Restore() error { - if p == 0 { - return nil - } - current, err := GetParentDeathSignal() - if err != nil { - return err - } - if p == current { - return nil - } - return p.Set() -} - -func (p ParentDeathSignal) Set() error { - return SetParentDeathSignal(uintptr(p)) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - - return unix.Exec(name, args, env) -} - -func Prlimit(pid, resource int, limit unix.Rlimit) error { - _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) - if err != 0 { - return err - } - return nil -} - -func SetParentDeathSignal(sig uintptr) error { - if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { - return err - } - return nil -} - -func GetParentDeathSignal() (ParentDeathSignal, error) { - var sig int - if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { - return -1, err - } - return ParentDeathSignal(sig), nil -} - -func SetKeepCaps() error { - if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { - return err - } - - return nil -} - -func ClearKeepCaps() error { - if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { - return err - } - - return nil -} - -func Setctty() error { - if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { - return err - } - return nil -} - -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) -} - -// GetSubreaper returns the subreaper setting for the calling process -func GetSubreaper() (int, error) { - var i uintptr - - if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { - return -1, err - } - - return int(i), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go deleted file mode 100644 index b73cf70b4..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ /dev/null @@ -1,103 +0,0 @@ -package system - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// State is the status of a process. -type State rune - -const ( // Only values for Linux 3.14 and later are listed here - Dead State = 'X' - DiskSleep State = 'D' - Running State = 'R' - Sleeping State = 'S' - Stopped State = 'T' - TracingStop State = 't' - Zombie State = 'Z' -) - -// String forms of the state from proc(5)'s documentation for -// /proc/[pid]/status' "State" field. -func (s State) String() string { - switch s { - case Dead: - return "dead" - case DiskSleep: - return "disk sleep" - case Running: - return "running" - case Sleeping: - return "sleeping" - case Stopped: - return "stopped" - case TracingStop: - return "tracing stop" - case Zombie: - return "zombie" - default: - return fmt.Sprintf("unknown (%c)", s) - } -} - -// Stat_t represents the information from /proc/[pid]/stat, as -// described in proc(5) with names based on the /proc/[pid]/status -// fields. -type Stat_t struct { - // PID is the process ID. - PID uint - - // Name is the command run by the process. - Name string - - // State is the state of the process. - State State - - // StartTime is the number of clock ticks after system boot (since - // Linux 2.6). - StartTime uint64 -} - -// Stat returns a Stat_t instance for the specified process. -func Stat(pid int) (stat Stat_t, err error) { - bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) - if err != nil { - return stat, err - } - return parseStat(string(bytes)) -} - -func parseStat(data string) (stat Stat_t, err error) { - // From proc(5), field 2 could contain space and is inside `(` and `)`. - // The following is an example: - // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 - i := strings.LastIndex(data, ")") - if i <= 2 || i >= len(data)-1 { - return stat, fmt.Errorf("invalid stat data: %q", data) - } - - parts := strings.SplitN(data[:i], "(", 2) - if len(parts) != 2 { - return stat, fmt.Errorf("invalid stat data: %q", data) - } - - stat.Name = parts[1] - _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) - if err != nil { - return stat, err - } - - // parts indexes should be offset by 3 from the field number given - // proc(5), because parts is zero-indexed and we've removed fields - // one (PID) and two (Name) in the paren-split. - parts = strings.Split(data[i+2:], " ") - var state int - fmt.Sscanf(parts[3-3], "%c", &state) - stat.State = State(state) - fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) - return stat, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go deleted file mode 100644 index c5ca5d862..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux -// +build 386 arm - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go deleted file mode 100644 index e05e30adc..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux -// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/userns_deprecated.go b/vendor/github.com/opencontainers/runc/libcontainer/system/userns_deprecated.go deleted file mode 100644 index 2de3462a5..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/userns_deprecated.go +++ /dev/null @@ -1,5 +0,0 @@ -package system - -import "github.com/opencontainers/runc/libcontainer/userns" - -var RunningInUserNS = userns.RunningInUserNS diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go deleted file mode 100644 index a6823fc99..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import "golang.org/x/sys/unix" - -// Returns a []byte slice if the xattr is set and nil otherwise -// Requires path and its attribute as arguments -func Lgetxattr(path string, attr string) ([]byte, error) { - var sz int - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - switch { - case errno == unix.ENODATA: - return nil, errno - case errno == unix.ENOTSUP: - return nil, errno - case errno == unix.ERANGE: - // 128 byte array might just not be good enough, - // A dummy buffer is used to get the real size - // of the xattrs on disk - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, errno - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - if errno != nil { - return nil, errno - } - case errno != nil: - return nil, errno - } - return dest[:sz], nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index 967717a1b..f95c1409f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package user diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 68da4400d..2473c5ead 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -2,6 +2,7 @@ package user import ( "bufio" + "bytes" "errors" "fmt" "io" @@ -11,19 +12,17 @@ import ( ) const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility + minID = 0 + maxID = 1<<31 - 1 // for 32-bit systems compatibility ) var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") - - // No matching entries found in file. + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - ErrNoGroupEntries = errors.New("no matching entries in group file") - - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId) + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = errors.New("no matching entries in group file") + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) ) type User struct { @@ -57,11 +56,11 @@ type IDMap struct { Count int64 } -func parseLine(line string, v ...interface{}) { - parseParts(strings.Split(line, ":"), v...) +func parseLine(line []byte, v ...interface{}) { + parseParts(bytes.Split(line, []byte(":")), v...) } -func parseParts(parts []string, v ...interface{}) { +func parseParts(parts [][]byte, v ...interface{}) { if len(parts) == 0 { return } @@ -77,16 +76,16 @@ func parseParts(parts []string, v ...interface{}) { // This is legit. switch e := v[i].(type) { case *string: - *e = p + *e = string(p) case *int: // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(p) + *e, _ = strconv.Atoi(string(p)) case *int64: - *e, _ = strconv.ParseInt(p, 10, 64) + *e, _ = strconv.ParseInt(string(p), 10, 64) case *[]string: // Comma-separated lists. - if p != "" { - *e = strings.Split(p, ",") + if len(p) != 0 { + *e = strings.Split(string(p), ",") } else { *e = []string{} } @@ -121,7 +120,7 @@ func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") + return nil, errors.New("nil source for passwd-formatted data") } var ( @@ -130,8 +129,8 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { ) for s.Scan() { - line := strings.TrimSpace(s.Text()) - if line == "" { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { continue } @@ -179,17 +178,55 @@ func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } + return nil, errors.New("nil source for group-formatted data") + } + rd := bufio.NewReader(r) + out := []Group{} + + // Read the file line-by-line. + for { + var ( + isPrefix bool + wholeLine []byte + err error + ) + + // Read the next line. We do so in chunks (as much as reader's + // buffer is able to keep), check if we read enough columns + // already on each step and store final result in wholeLine. + for { + var line []byte + line, isPrefix, err = rd.ReadLine() - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) + if err != nil { + // We should return no error if EOF is reached + // without a match. + if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12 + err = nil + } + return out, err + } - for s.Scan() { - text := s.Text() - if text == "" { + // Simple common case: line is short enough to fit in a + // single reader's buffer. + if !isPrefix && len(wholeLine) == 0 { + wholeLine = line + break + } + + wholeLine = append(wholeLine, line...) + + // Check if we read the whole line already. + if !isPrefix { + break + } + } + + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + wholeLine = bytes.TrimSpace(wholeLine) + if len(wholeLine) == 0 || wholeLine[0] == '#' { continue } @@ -199,17 +236,12 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { // root:x:0:root // adm:x:4:root,adm,daemon p := Group{} - parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List) + parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) if filter == nil || filter(p) { out = append(out, p) } } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil } type ExecUser struct { @@ -280,7 +312,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax var userArg, groupArg string - parseLine(userSpec, &userArg, &groupArg) + parseLine([]byte(userSpec), &userArg, &groupArg) // Convert userArg and groupArg to be numeric, so we don't have to execute // Atoi *twice* for each iteration over lines. @@ -307,7 +339,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if userArg == "" { userArg = strconv.Itoa(user.Uid) } - return nil, fmt.Errorf("unable to find user %s: %v", userArg, err) + return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) } var matchedUserName string @@ -323,12 +355,12 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if uidErr != nil { // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries) + return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) } user.Uid = uidArg // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { + if user.Uid < minID || user.Uid > maxID { return nil, ErrRange } @@ -358,7 +390,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( return g.Name == groupArg }) if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err) + return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) } // Only start modifying user.Gid if it is in explicit form. @@ -372,12 +404,12 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if gidErr != nil { // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries) + return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) } user.Gid = gidArg // Must be inside valid gid range. - if user.Gid < minId || user.Gid > maxId { + if user.Gid < minID || user.Gid > maxID { return nil, ErrRange } @@ -401,7 +433,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( // or the given group data is nil, the id will be returned as-is // provided it is in the legal range. func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - var groups = []Group{} + groups := []Group{} if group != nil { var err error groups, err = ParseGroupFilter(group, func(g Group) bool { @@ -413,7 +445,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err return false }) if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) } } @@ -436,10 +468,11 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err if !found { gid, err := strconv.ParseInt(ag, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) + // Not a numeric ID either. + return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) } // Ensure gid is inside gid range. - if gid < minId || gid > maxId { + if gid < minID || gid > maxID { return nil, ErrRange } gidMap[int(gid)] = struct{}{} @@ -489,7 +522,7 @@ func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { if r == nil { - return nil, fmt.Errorf("nil source for subid-formatted data") + return nil, errors.New("nil source for subid-formatted data") } var ( @@ -498,8 +531,8 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { ) for s.Scan() { - line := strings.TrimSpace(s.Text()) - if line == "" { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { continue } @@ -542,7 +575,7 @@ func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { if r == nil { - return nil, fmt.Errorf("nil source for idmap-formatted data") + return nil, errors.New("nil source for idmap-formatted data") } var ( @@ -551,14 +584,14 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { ) for s.Scan() { - line := strings.TrimSpace(s.Text()) - if line == "" { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { continue } // see: man 7 user_namespaces p := IDMap{} - parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count) + parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) if filter == nil || filter(p) { out = append(out, p) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go index 8c9bb5df3..e018eae61 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package user diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go index 529f8eaea..1e00ab8b5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package userns diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go index f45bb0c31..f35c13a10 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package userns diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go index c8a9364d5..7ef9da21f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go @@ -1,5 +1,3 @@ -// +build linux - package utils /* @@ -88,6 +86,11 @@ func SendFd(socket *os.File, name string, fd uintptr) error { if len(name) >= MaxNameLen { return fmt.Errorf("sendfd: filename too long: %s", name) } - oob := unix.UnixRights(int(fd)) - return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0) + return SendFds(socket, []byte(name), int(fd)) +} + +// SendFds sends a list of files descriptor and msg over the given AF_UNIX socket. +func SendFds(socket *os.File, msg []byte, fds ...int) error { + oob := unix.UnixRights(fds...) + return unix.Sendmsg(int(socket.Fd()), msg, oob, nil, 0) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go index cd78f23e1..6b9fc3435 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go @@ -11,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" "golang.org/x/sys/unix" ) @@ -33,16 +33,6 @@ func init() { } } -// ResolveRootfs ensures that the current working directory is -// not a symlink and returns the absolute path to the rootfs -func ResolveRootfs(uncleanRootfs string) (string, error) { - rootfs, err := filepath.Abs(uncleanRootfs) - if err != nil { - return "", err - } - return filepath.EvalSymlinks(rootfs) -} - // ExitStatus returns the correct exit status for a process based on if it // was signaled or exited cleanly func ExitStatus(status unix.WaitStatus) int { @@ -120,7 +110,7 @@ func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { unsafePath = stripRoot(root, unsafePath) path, err := securejoin.SecureJoin(root, unsafePath) if err != nil { - return fmt.Errorf("resolving path inside rootfs failed: %v", err) + return fmt.Errorf("resolving path inside rootfs failed: %w", err) } // Open the target path. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index 1576f2d4a..220d0b439 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package utils @@ -14,7 +15,7 @@ import ( func EnsureProcHandle(fh *os.File) error { var buf unix.Statfs_t if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil { - return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err) + return fmt.Errorf("ensure %s is on procfs: %w", fh.Name(), err) } if buf.Type != unix.PROC_SUPER_MAGIC { return fmt.Errorf("%s is not on procfs", fh.Name()) @@ -52,7 +53,7 @@ func CloseExecFrom(minFd int) error { // Intentionally ignore errors from unix.CloseOnExec -- the cases where // this might fail are basically file descriptors that have already // been closed (including and especially the one that was created when - // ioutil.ReadDir did the "opendir" syscall). + // os.ReadDir did the "opendir" syscall). unix.CloseOnExec(fd) } return nil diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go index 0ac7d819e..57a15c9a1 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -9,6 +9,5 @@ Usage: if selinux.EnforceMode() != selinux.Enforcing { selinux.SetEnforceMode(selinux.Enforcing) } - */ package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index b3d142d8c..f61a56015 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -1,12 +1,11 @@ package label import ( - "os" - "os/user" + "errors" + "fmt" "strings" "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" ) // Valid Label Options @@ -53,11 +52,11 @@ func InitLabels(options []string) (plabel string, mlabel string, retErr error) { return "", selinux.PrivContainerMountLabel(), nil } if i := strings.Index(opt, ":"); i == -1 { - return "", "", errors.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) } con := strings.SplitN(opt, ":", 2) if !validOptions[con[0]] { - return "", "", errors.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) } if con[0] == "filetype" { mcon["type"] = con[1] @@ -102,58 +101,16 @@ func SetFileCreateLabel(fileLabel string) error { return selinux.SetFSCreateLabel(fileLabel) } -// Relabel changes the label of path to the filelabel string. +// Relabel changes the label of path and all the entries beneath the path. // It changes the MCS label to s0 if shared is true. // This will allow all containers to share the content. +// +// The path itself is guaranteed to be relabeled last. func Relabel(path string, fileLabel string, shared bool) error { if !selinux.GetEnabled() || fileLabel == "" { return nil } - exclude_paths := map[string]bool{ - "/": true, - "/bin": true, - "/boot": true, - "/dev": true, - "/etc": true, - "/etc/passwd": true, - "/etc/pki": true, - "/etc/shadow": true, - "/home": true, - "/lib": true, - "/lib64": true, - "/media": true, - "/opt": true, - "/proc": true, - "/root": true, - "/run": true, - "/sbin": true, - "/srv": true, - "/sys": true, - "/tmp": true, - "/usr": true, - "/var": true, - "/var/lib": true, - "/var/log": true, - } - - if home := os.Getenv("HOME"); home != "" { - exclude_paths[home] = true - } - - if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { - if usr, err := user.Lookup(sudoUser); err == nil { - exclude_paths[usr.HomeDir] = true - } - } - - if path != "/" { - path = strings.TrimSuffix(path, "/") - } - if exclude_paths[path] { - return errors.Errorf("SELinux relabeling of %s is not allowed", path) - } - if shared { c, err := selinux.NewContext(fileLabel) if err != nil { diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index 02d206239..f21c80c5a 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package label diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go new file mode 100644 index 000000000..8bff29355 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go @@ -0,0 +1,34 @@ +//go:build linux && go1.16 +// +build linux,go1.16 + +package selinux + +import ( + "errors" + "io/fs" + "os" + + "github.com/opencontainers/selinux/pkg/pwalkdir" +) + +func rchcon(fpath, label string) error { + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } + return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + e := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(e, os.ErrNotExist) { + return nil + } + return e + }) +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go new file mode 100644 index 000000000..303cb1890 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go @@ -0,0 +1,22 @@ +//go:build linux && !go1.16 +// +build linux,!go1.16 + +package selinux + +import ( + "errors" + "os" + + "github.com/opencontainers/selinux/pkg/pwalk" +) + +func rchcon(fpath, label string) error { + return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error { + e := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(e, os.ErrNotExist) { + return nil + } + return e + }) +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index b336ebad3..5a59d151f 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -1,7 +1,7 @@ package selinux import ( - "github.com/pkg/errors" + "errors" ) const ( @@ -38,6 +38,8 @@ var ( // CategoryRange allows the upper bound on the category range to be adjusted CategoryRange = DefaultCategoryRange + + privContainerMountLabel string ) // Context is a representation of the SELinux label broken into 4 parts @@ -59,16 +61,30 @@ func ClassIndex(class string) (int, error) { return classIndex(class) } -// SetFileLabel sets the SELinux label for this path or returns an error. +// SetFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. func SetFileLabel(fpath string, label string) error { return setFileLabel(fpath, label) } -// FileLabel returns the SELinux label for this path or returns an error. +// LsetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func LsetFileLabel(fpath string, label string) error { + return lSetFileLabel(fpath, label) +} + +// FileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. func FileLabel(fpath string) (string, error) { return fileLabel(fpath) } +// LfileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func LfileLabel(fpath string) (string, error) { + return lFileLabel(fpath) +} + // SetFSCreateLabel tells the kernel what label to use for all file system objects // created by this task. // Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel @@ -253,6 +269,8 @@ func CopyLevel(src, dest string) (string, error) { // Chcon changes the fpath file object to the SELinux label label. // If fpath is a directory and recurse is true, then Chcon walks the // directory tree setting the label. +// +// The fpath itself is guaranteed to be relabeled last. func Chcon(fpath string, label string, recurse bool) error { return chcon(fpath, label, recurse) } @@ -280,5 +298,7 @@ func GetDefaultContextWithLevel(user, level, scon string) (string, error) { // PrivContainerMountLabel returns mount label for privileged containers func PrivContainerMountLabel() string { + // Make sure label is initialized. + _ = label("") return privContainerMountLabel } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 54597398b..4582cc9e0 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -5,20 +5,19 @@ import ( "bytes" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "io/ioutil" + "math/big" "os" + "os/user" "path" "path/filepath" - "regexp" "strconv" "strings" "sync" - "github.com/opencontainers/selinux/pkg/pwalk" - "github.com/pkg/errors" - "github.com/willf/bitset" "golang.org/x/sys/unix" ) @@ -35,8 +34,6 @@ const ( xattrNameSelinux = "security.selinux" ) -var policyRoot = filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) - type selinuxState struct { enabledSet bool enabled bool @@ -48,7 +45,7 @@ type selinuxState struct { type level struct { sens uint - cats *bitset.BitSet + cats *big.Int } type mlsRange struct { @@ -71,7 +68,6 @@ const ( ) var ( - assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) readOnlyFileLabel string state = selinuxState{ mcsList: make(map[string]bool), @@ -80,8 +76,24 @@ var ( // for attrPath() attrPathOnce sync.Once haveThreadSelf bool + + // for policyRoot() + policyRootOnce sync.Once + policyRootVal string + + // for label() + loadLabelsOnce sync.Once + labels map[string]string ) +func policyRoot() string { + policyRootOnce.Do(func() { + policyRootVal = filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) + }) + + return policyRootVal +} + func (s *selinuxState) setEnable(enabled bool) bool { s.Lock() defer s.Unlock() @@ -120,7 +132,7 @@ func verifySELinuxfsMount(mnt string) bool { if err == nil { break } - if err == unix.EAGAIN || err == unix.EINTR { + if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare continue } return false @@ -223,7 +235,7 @@ func readConfig(target string) string { scanner := bufio.NewScanner(in) for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) + line := bytes.TrimSpace(scanner.Bytes()) if len(line) == 0 { // Skip blank lines continue @@ -232,11 +244,12 @@ func readConfig(target string) string { // Skip comments continue } - if groups := assignRegex.FindStringSubmatch(line); groups != nil { - key, val := strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) - if key == target { - return strings.Trim(val, "\"") - } + fields := bytes.SplitN(line, []byte{'='}, 2) + if len(fields) != 2 { + continue + } + if bytes.Equal(fields[0], []byte(target)) { + return string(bytes.Trim(fields[1], `"`)) } } return "" @@ -250,12 +263,12 @@ func isProcHandle(fh *os.File) error { if err == nil { break } - if err != unix.EINTR { - return errors.Wrapf(err, "statfs(%q) failed", fh.Name()) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err} } } if buf.Type != unix.PROC_SUPER_MAGIC { - return errors.Errorf("file %q is not on procfs", fh.Name()) + return fmt.Errorf("file %q is not on procfs", fh.Name()) } return nil @@ -275,12 +288,15 @@ func readCon(fpath string) (string, error) { if err := isProcHandle(in); err != nil { return "", err } + return readConFd(in) +} - var retval string - if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { +func readConFd(in *os.File) (string, error) { + data, err := ioutil.ReadAll(in) + if err != nil { return "", err } - return strings.Trim(retval, "\x00"), nil + return string(bytes.TrimSuffix(data, []byte{0})), nil } // classIndex returns the int index for an object class in the loaded policy, @@ -301,8 +317,9 @@ func classIndex(class string) (int, error) { return index, nil } -// setFileLabel sets the SELinux label for this path or returns an error. -func setFileLabel(fpath string, label string) error { +// lSetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func lSetFileLabel(fpath string, label string) error { if fpath == "" { return ErrEmptyPath } @@ -311,23 +328,61 @@ func setFileLabel(fpath string, label string) error { if err == nil { break } - if err != unix.EINTR { - return errors.Wrapf(err, "failed to set file label on %s", fpath) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err} + } + } + + return nil +} + +// setFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. +func setFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + for { + err := unix.Setxattr(fpath, xattrNameSelinux, []byte(label), 0) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "setxattr", Path: fpath, Err: err} } } return nil } -// fileLabel returns the SELinux label for this path or returns an error. +// fileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. func fileLabel(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } + label, err := getxattr(fpath, xattrNameSelinux) + if err != nil { + return "", &os.PathError{Op: "getxattr", Path: fpath, Err: err} + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +// lFileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func lFileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + label, err := lgetxattr(fpath, xattrNameSelinux) if err != nil { - return "", err + return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err} } // Trim the NUL byte at the end of the byte buffer, if present. if len(label) > 0 && label[len(label)-1] == '\x00' { @@ -390,7 +445,7 @@ func writeCon(fpath, val string) error { _, err = out.Write(nil) } if err != nil { - return errors.Wrapf(err, "failed to set %s on procfs", fpath) + return err } return nil } @@ -440,8 +495,8 @@ func computeCreateContext(source string, target string, class string) (string, e } // catsToBitset stores categories in a bitset. -func catsToBitset(cats string) (*bitset.BitSet, error) { - bitset := &bitset.BitSet{} +func catsToBitset(cats string) (*big.Int, error) { + bitset := new(big.Int) catlist := strings.Split(cats, ",") for _, r := range catlist { @@ -456,14 +511,14 @@ func catsToBitset(cats string) (*bitset.BitSet, error) { return nil, err } for i := catstart; i <= catend; i++ { - bitset.Set(i) + bitset.SetBit(bitset, int(i), 1) } } else { cat, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } - bitset.Set(cat) + bitset.SetBit(bitset, int(cat), 1) } } @@ -489,13 +544,13 @@ func (l *level) parseLevel(levelStr string) error { lvl := strings.SplitN(levelStr, ":", 2) sens, err := parseLevelItem(lvl[0], sensitivity) if err != nil { - return errors.Wrap(err, "failed to parse sensitivity") + return fmt.Errorf("failed to parse sensitivity: %w", err) } l.sens = sens if len(lvl) > 1 { cats, err := catsToBitset(lvl[1]) if err != nil { - return errors.Wrap(err, "failed to parse categories") + return fmt.Errorf("failed to parse categories: %w", err) } l.cats = cats } @@ -513,14 +568,14 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { case 2: mlsRange.high = &level{} if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { - return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1]) + return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: mlsRange.low = &level{} if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { - return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0]) + return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err) } } @@ -533,37 +588,30 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { // bitsetToStr takes a category bitset and returns it in the // canonical selinux syntax -func bitsetToStr(c *bitset.BitSet) string { +func bitsetToStr(c *big.Int) string { var str string - i, e := c.NextSet(0) - len := 0 - for e { - if len == 0 { + + length := 0 + for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ { + if c.Bit(i) == 0 { + continue + } + if length == 0 { if str != "" { str += "," } - str += "c" + strconv.Itoa(int(i)) + str += "c" + strconv.Itoa(i) } - - next, e := c.NextSet(i + 1) - if e { - // consecutive cats - if next == i+1 { - len++ - i = next - continue - } + if c.Bit(i+1) == 1 { + length++ + continue } - if len == 1 { - str += ",c" + strconv.Itoa(int(i)) - } else if len > 1 { - str += ".c" + strconv.Itoa(int(i)) + if length == 1 { + str += ",c" + strconv.Itoa(i) + } else if length > 1 { + str += ".c" + strconv.Itoa(i) } - if !e { - break - } - len = 0 - i = next + length = 0 } return str @@ -576,13 +624,16 @@ func (l1 *level) equal(l2 *level) bool { if l1.sens != l2.sens { return false } - return l1.cats.Equal(l2.cats) + if l2.cats == nil || l1.cats == nil { + return l2.cats == l1.cats + } + return l1.cats.Cmp(l2.cats) == 0 } // String returns an mlsRange as a string. func (m mlsRange) String() string { low := "s" + strconv.Itoa(int(m.low.sens)) - if m.low.cats != nil && m.low.cats.Count() > 0 { + if m.low.cats != nil && m.low.cats.BitLen() > 0 { low += ":" + bitsetToStr(m.low.cats) } @@ -591,7 +642,7 @@ func (m mlsRange) String() string { } high := "s" + strconv.Itoa(int(m.high.sens)) - if m.high.cats != nil && m.high.cats.Count() > 0 { + if m.high.cats != nil && m.high.cats.BitLen() > 0 { high += ":" + bitsetToStr(m.high.cats) } @@ -641,10 +692,12 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { /* find the intersecting categories */ if s.low.cats != nil && t.low.cats != nil { - outrange.low.cats = s.low.cats.Intersection(t.low.cats) + outrange.low.cats = new(big.Int) + outrange.low.cats.And(s.low.cats, t.low.cats) } if s.high.cats != nil && t.high.cats != nil { - outrange.high.cats = s.high.cats.Intersection(t.high.cats) + outrange.high.cats = new(big.Int) + outrange.high.cats.And(s.high.cats, t.high.cats) } return outrange.String(), nil @@ -665,11 +718,7 @@ func readWriteCon(fpath string, val string) (string, error) { return "", err } - var retval string - if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { - return "", err - } - return strings.Trim(retval, "\x00"), nil + return readConFd(f) } // setExecLabel sets the SELinux label that the kernel will use for any programs @@ -697,17 +746,21 @@ func socketLabel() (string, error) { // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { - return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + if err != nil { + return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err} + } + return label, nil } // setKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func setKeyLabel(label string) error { err := writeCon("/proc/self/attr/keycreate", label) - if os.IsNotExist(errors.Cause(err)) { + if errors.Is(err, os.ErrNotExist) { return nil } - if label == "" && os.IsPermission(errors.Cause(err)) { + if label == "" && errors.Is(err, os.ErrPermission) { return nil } return err @@ -720,10 +773,10 @@ func keyLabel() (string, error) { // get returns the Context as a string func (c Context) get() string { - if c["level"] != "" { - return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) + if level := c["level"]; level != "" { + return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level } - return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) + return c["user"] + ":" + c["role"] + ":" + c["type"] } // newContext creates a new Context struct from the specified label @@ -784,7 +837,7 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644) + return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, @@ -888,24 +941,21 @@ func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } - lxcPath := filepath.Join(policyRoot, "/contexts/lxc_contexts") - return os.Open(lxcPath) + return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts")) } -var labels, privContainerMountLabel = loadLabels() - -func loadLabels() (map[string]string, string) { - labels := make(map[string]string) +func loadLabels() { + labels = make(map[string]string) in, err := openContextFile() if err != nil { - return labels, "" + return } defer in.Close() scanner := bufio.NewScanner(in) for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) + line := bytes.TrimSpace(scanner.Bytes()) if len(line) == 0 { // Skip blank lines continue @@ -914,38 +964,47 @@ func loadLabels() (map[string]string, string) { // Skip comments continue } - if groups := assignRegex.FindStringSubmatch(line); groups != nil { - key, val := strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) - labels[key] = strings.Trim(val, "\"") + fields := bytes.SplitN(line, []byte{'='}, 2) + if len(fields) != 2 { + continue } + key, val := bytes.TrimSpace(fields[0]), bytes.TrimSpace(fields[1]) + labels[string(key)] = string(bytes.Trim(val, `"`)) } con, _ := NewContext(labels["file"]) con["level"] = fmt.Sprintf("s0:c%d,c%d", maxCategory-2, maxCategory-1) - reserveLabel(con.get()) - return labels, con.get() + privContainerMountLabel = con.get() + reserveLabel(privContainerMountLabel) +} + +func label(key string) string { + loadLabelsOnce.Do(func() { + loadLabels() + }) + return labels[key] } // kvmContainerLabels returns the default processLabel and mountLabel to be used // for kvm containers by the calling process. func kvmContainerLabels() (string, string) { - processLabel := labels["kvm_process"] + processLabel := label("kvm_process") if processLabel == "" { - processLabel = labels["process"] + processLabel = label("process") } - return addMcs(processLabel, labels["file"]) + return addMcs(processLabel, label("file")) } // initContainerLabels returns the default processLabel and file labels to be // used for containers running an init system like systemd by the calling process. func initContainerLabels() (string, string) { - processLabel := labels["init_process"] + processLabel := label("init_process") if processLabel == "" { - processLabel = labels["process"] + processLabel = label("process") } - return addMcs(processLabel, labels["file"]) + return addMcs(processLabel, label("file")) } // containerLabels returns an allocated processLabel and fileLabel to be used for @@ -955,9 +1014,9 @@ func containerLabels() (processLabel string, fileLabel string) { return "", "" } - processLabel = labels["process"] - fileLabel = labels["file"] - readOnlyFileLabel = labels["ro_file"] + processLabel = label("process") + fileLabel = label("file") + readOnlyFileLabel = label("ro_file") if processLabel == "" || fileLabel == "" { return "", fileLabel @@ -985,7 +1044,7 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644) + return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on @@ -1014,21 +1073,6 @@ func copyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// Prevent users from relabeling system files -func badPrefix(fpath string) error { - if fpath == "" { - return ErrEmptyPath - } - - badPrefixes := []string{"/usr"} - for _, prefix := range badPrefixes { - if strings.HasPrefix(fpath, prefix) { - return errors.Errorf("relabeling content in %s is not allowed", prefix) - } - } - return nil -} - // chcon changes the fpath file object to the SELinux label label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. @@ -1039,22 +1083,73 @@ func chcon(fpath string, label string, recurse bool) error { if label == "" { return nil } - if err := badPrefix(fpath); err != nil { - return err + + exclude_paths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := os.Getenv("HOME"); home != "" { + exclude_paths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + exclude_paths[usr.HomeDir] = true + } } - if !recurse { - return SetFileLabel(fpath, label) + if fpath != "/" { + fpath = strings.TrimSuffix(fpath, "/") + } + if exclude_paths[fpath] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) } - return pwalk.Walk(fpath, func(p string, info os.FileInfo, err error) error { - e := SetFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT - if os.IsNotExist(errors.Cause(e)) { - return nil + if !recurse { + err := lSetFileLabel(fpath, label) + if err != nil { + // Check if file doesn't exist, must have been removed + if errors.Is(err, os.ErrNotExist) { + return nil + } + // Check if current label is correct on disk + flabel, nerr := lFileLabel(fpath) + if nerr == nil && flabel == label { + return nil + } + // Check if file doesn't exist, must have been removed + if errors.Is(nerr, os.ErrNotExist) { + return nil + } + return err } - return e - }) + return nil + } + + return rchcon(fpath, label) } // dupSecOpt takes an SELinux process label and returns security options that @@ -1072,7 +1167,8 @@ func dupSecOpt(src string) ([]string, error) { con["type"] == "" { return nil, nil } - dup := []string{"user:" + con["user"], + dup := []string{ + "user:" + con["user"], "role:" + con["role"], "type:" + con["type"], } @@ -1140,9 +1236,8 @@ func findUserInContext(context Context, r io.Reader, verifier func(string) error return outConn, nil } } - if err := scanner.Err(); err != nil { - return "", errors.Wrap(err, "failed to scan for context") + return "", fmt.Errorf("failed to scan for context: %w", err) } return "", nil @@ -1155,7 +1250,7 @@ func getDefaultContextFromReaders(c *defaultSECtx) (string, error) { context, err := newContext(c.scon) if err != nil { - return "", errors.Wrapf(err, "failed to create label for %s", c.scon) + return "", fmt.Errorf("failed to create label for %s: %w", c.scon, err) } // set so the verifier validates the matched context with the provided user and level. @@ -1180,19 +1275,18 @@ func getDefaultContextFromReaders(c *defaultSECtx) (string, error) { return conn, nil } - return "", errors.Wrapf(ErrContextMissing, "context not found: %q", c.scon) + return "", fmt.Errorf("context %q not found: %w", c.scon, ErrContextMissing) } func getDefaultContextWithLevel(user, level, scon string) (string, error) { - userPath := filepath.Join(policyRoot, selinuxUsersDir, user) - defaultPath := filepath.Join(policyRoot, defaultContexts) - + userPath := filepath.Join(policyRoot(), selinuxUsersDir, user) fu, err := os.Open(userPath) if err != nil { return "", err } defer fu.Close() + defaultPath := filepath.Join(policyRoot(), defaultContexts) fd, err := os.Open(defaultPath) if err != nil { return "", err diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index b7218a0b6..20d888031 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -1,9 +1,8 @@ +//go:build !linux // +build !linux package selinux -const privContainerMountLabel = "" - func setDisabled() { } @@ -19,10 +18,18 @@ func setFileLabel(fpath string, label string) error { return nil } +func lSetFileLabel(fpath string, label string) error { + return nil +} + func fileLabel(fpath string) (string, error) { return "", nil } +func lFileLabel(fpath string) (string, error) { + return "", nil +} + func setFSCreateLabel(label string) error { return nil } @@ -152,3 +159,7 @@ func disableSecOpt() []string { func getDefaultContextWithLevel(user, level, scon string) (string, error) { return "", nil } + +func label(_ string) string { + return "" +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go index 117c255be..9e473ca16 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go @@ -10,7 +10,7 @@ func lgetxattr(path, attr string) ([]byte, error) { // Start with a 128 length byte array dest := make([]byte, 128) sz, errno := doLgetxattr(path, attr, dest) - for errno == unix.ERANGE { + for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare // Buffer too small, use zero-sized buffer to get the actual size sz, errno = doLgetxattr(path, attr, []byte{}) if errno != nil { @@ -31,7 +31,40 @@ func lgetxattr(path, attr string) ([]byte, error) { func doLgetxattr(path, attr string, dest []byte) (int, error) { for { sz, err := unix.Lgetxattr(path, attr, dest) - if err != unix.EINTR { + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return sz, err + } + } +} + +// getxattr returns a []byte slice containing the value of +// an extended attribute attr set for path. +func getxattr(path, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := dogetxattr(path, attr, dest) + for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = dogetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + + dest = make([]byte, sz) + sz, errno = dogetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// dogetxattr is a wrapper that retries on EINTR +func dogetxattr(path, attr string, dest []byte) (int, error) { + for { + sz, err := unix.Getxattr(path, attr, dest) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare return sz, err } } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md index 16c4dfd3e..7e78dce01 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md @@ -8,6 +8,12 @@ By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. This can be changed by using WalkN function which has the additional parameter, specifying the number of goroutines (concurrency). +### pwalk vs pwalkdir + +This package is deprecated in favor of +[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), +which is faster, but requires at least Go 1.16. + ### Caveats Please note the following limitations of this code: diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go index 437b12b3e..202c80da5 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go @@ -1,12 +1,11 @@ package pwalk import ( + "fmt" "os" "path/filepath" "runtime" "sync" - - "github.com/pkg/errors" ) type WalkFunc = filepath.WalkFunc @@ -20,7 +19,7 @@ type WalkFunc = filepath.WalkFunc // // Note that this implementation only supports primitive error handling: // -// - no errors are ever passed to WalkFn; +// - no errors are ever passed to walkFn; // // - once a walkFn returns any error, all further processing stops // and the error is returned to the caller of Walk; @@ -42,7 +41,7 @@ func Walk(root string, walkFn WalkFunc) error { func WalkN(root string, walkFn WalkFunc, num int) error { // make sure limit is sensible if num < 1 { - return errors.Errorf("walk(%q): num must be > 0", root) + return fmt.Errorf("walk(%q): num must be > 0", root) } files := make(chan *walkArgs, 2*num) @@ -52,6 +51,9 @@ func WalkN(root string, walkFn WalkFunc, num int) error { var ( err error wg sync.WaitGroup + + rootLen = len(root) + rootEntry *walkArgs ) wg.Add(1) go func() { @@ -60,6 +62,11 @@ func WalkN(root string, walkFn WalkFunc, num int) error { close(files) return err } + if len(p) == rootLen { + // Root entry is processed separately below. + rootEntry = &walkArgs{path: p, info: &info} + return nil + } // add a file to the queue unless a callback sent an error select { case e := <-errCh: @@ -93,10 +100,14 @@ func WalkN(root string, walkFn WalkFunc, num int) error { wg.Wait() + if err == nil { + err = walkFn(rootEntry.path, *rootEntry.info, nil) + } + return err } -// walkArgs holds the arguments that were passed to the Walk or WalkLimit +// walkArgs holds the arguments that were passed to the Walk or WalkN // functions. type walkArgs struct { path string diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md new file mode 100644 index 000000000..068ac4005 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md @@ -0,0 +1,54 @@ +## pwalkdir: parallel implementation of filepath.WalkDir + +This is a wrapper for [filepath.WalkDir](https://pkg.go.dev/path/filepath#WalkDir) +which may speed it up by calling multiple callback functions (WalkDirFunc) +in parallel, utilizing goroutines. + +By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. +This can be changed by using WalkN function which has the additional +parameter, specifying the number of goroutines (concurrency). + +### pwalk vs pwalkdir + +This package is very similar to +[pwalk](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), +but utilizes `filepath.WalkDir` (added to Go 1.16), which does not call stat(2) +on every entry and is therefore faster (up to 3x, depending on usage scenario). + +Users who are OK with requiring Go 1.16+ should switch to this +implementation. + +### Caveats + +Please note the following limitations of this code: + +* Unlike filepath.WalkDir, the order of calls is non-deterministic; + +* Only primitive error handling is supported: + + * fs.SkipDir is not supported; + + * no errors are ever passed to WalkDirFunc; + + * once any error is returned from any walkDirFunc instance, no more calls + to WalkDirFunc are made, and the error is returned to the caller of WalkDir; + + * if more than one WalkDirFunc instance will return an error, only one + of such errors will be propagated to and returned by WalkDir, others + will be silently discarded. + +### Documentation + +For the official documentation, see +https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir + +### Benchmarks + +For a WalkDirFunc that consists solely of the return statement, this +implementation is about 15% slower than the standard library's +filepath.WalkDir. + +Otherwise (if a WalkDirFunc is actually doing something) this is usually +faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .` +to see how different operations can benefit from it, as well as how the +level of paralellism affects the speed. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go new file mode 100644 index 000000000..a5796b2c4 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -0,0 +1,116 @@ +//go:build go1.16 +// +build go1.16 + +package pwalkdir + +import ( + "fmt" + "io/fs" + "path/filepath" + "runtime" + "sync" +) + +// Walk is a wrapper for filepath.WalkDir which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// twice the runtime.NumCPU() walkFn will be called at any one time. +// If you want to change the maximum, use WalkN instead. +// +// The order of calls is non-deterministic. +// +// Note that this implementation only supports primitive error handling: +// +// - no errors are ever passed to walkFn; +// +// - once a walkFn returns any error, all further processing stops +// and the error is returned to the caller of Walk; +// +// - filepath.SkipDir is not supported; +// +// - if more than one walkFn instance will return an error, only one +// of such errors will be propagated and returned by Walk, others +// will be silently discarded. +func Walk(root string, walkFn fs.WalkDirFunc) error { + return WalkN(root, walkFn, runtime.NumCPU()*2) +} + +// WalkN is a wrapper for filepath.WalkDir which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// num walkFn will be called at any one time. +// +// Please see Walk documentation for caveats of using this function. +func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { + // make sure limit is sensible + if num < 1 { + return fmt.Errorf("walk(%q): num must be > 0", root) + } + + files := make(chan *walkArgs, 2*num) + errCh := make(chan error, 1) // Get the first error, ignore others. + + // Start walking a tree asap. + var ( + err error + wg sync.WaitGroup + + rootLen = len(root) + rootEntry *walkArgs + ) + wg.Add(1) + go func() { + err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error { + if err != nil { + close(files) + return err + } + if len(p) == rootLen { + // Root entry is processed separately below. + rootEntry = &walkArgs{path: p, entry: entry} + return nil + } + // Add a file to the queue unless a callback sent an error. + select { + case e := <-errCh: + close(files) + return e + default: + files <- &walkArgs{path: p, entry: entry} + return nil + } + }) + if err == nil { + close(files) + } + wg.Done() + }() + + wg.Add(num) + for i := 0; i < num; i++ { + go func() { + for file := range files { + if e := walkFn(file.path, file.entry, nil); e != nil { + select { + case errCh <- e: // sent ok + default: // buffer full + } + } + } + wg.Done() + }() + } + + wg.Wait() + + if err == nil { + err = walkFn(rootEntry.path, rootEntry.entry, nil) + } + + return err +} + +// walkArgs holds the arguments that were passed to the Walk or WalkN +// functions. +type walkArgs struct { + path string + entry fs.DirEntry +} diff --git a/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml new file mode 100644 index 000000000..7df8aa198 --- /dev/null +++ b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml @@ -0,0 +1,4 @@ +# For documentation, see https://golangci-lint.run/usage/configuration/ +linters: + enable: + - gofumpt diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml deleted file mode 100644 index feef144d1..000000000 --- a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -# Travis CI configuration for libseccomp-golang - -# https://docs.travis-ci.com/user/reference/bionic -# https://wiki.ubuntu.com/Releases - -dist: bionic -sudo: false - -notifications: - email: - on_success: always - on_failure: always - -arch: - - amd64 - -os: - - linux - -language: go - -addons: - apt: - packages: - - build-essential - # TODO: use the main libseccomp git repo instead of a distro package - - libseccomp2 - - libseccomp-dev - -install: - - go get -u golang.org/x/lint/golint - -# run all of the tests independently, fail if any of the tests error -script: - - make check-syntax - - make lint - - make check diff --git a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md index d6862cbd5..c2fc80d5a 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md +++ b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md @@ -1,31 +1,23 @@ -How to Submit Patches to the libseccomp Project +How to Submit Patches to the libseccomp-golang Project =============================================================================== https://github.com/seccomp/libseccomp-golang This document is intended to act as a guide to help you contribute to the -libseccomp project. It is not perfect, and there will always be exceptions -to the rules described here, but by following the instructions below you -should have a much easier time getting your work merged with the upstream +libseccomp-golang project. It is not perfect, and there will always be +exceptions to the rules described here, but by following the instructions below +you should have a much easier time getting your work merged with the upstream project. ## Test Your Code Using Existing Tests -There are two possible tests you can run to verify your code. The first -test is used to check the formatting and coding style of your changes, you -can run the test with the following command: - - # make check-syntax - -... if there are any problems with your changes a diff/patch will be shown -which indicates the problems and how to fix them. - -The second possible test is used to ensure the sanity of your code changes -and to test these changes against the included tests. You can run the test -with the following command: +A number of tests and lint related recipes are provided in the Makefile, if +you want to run the standard regression tests, you can execute the following: # make check -... if there are any faults or errors they will be displayed. +In order to use it, the 'golangci-lint' tool is needed, which can be found at: + +* https://github.com/golangci/golangci-lint ## Add New Tests for New Functionality diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile index 1ff4cc898..530f5b4ad 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/Makefile +++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile @@ -4,7 +4,7 @@ all: check-build -check: vet test +check: lint test check-build: go build @@ -16,11 +16,16 @@ fix-syntax: gofmt -w . vet: - go vet -v + go vet -v ./... + +# Previous bugs have made the tests freeze until the timeout. Golang default +# timeout for tests is 10 minutes, which is too long, considering current tests +# can be executed in less than 1 second. Reduce the timeout, so problems can +# be noticed earlier in the CI. +TEST_TIMEOUT=10s test: - go test -v + go test -v -timeout $(TEST_TIMEOUT) lint: - @$(if $(shell which golint),true,$(error "install golint and include it in your PATH")) - golint -set_exit_status + golangci-lint run . diff --git a/vendor/github.com/seccomp/libseccomp-golang/README.md b/vendor/github.com/seccomp/libseccomp-golang/README.md index 27423f2d9..6430f1c9e 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/README.md +++ b/vendor/github.com/seccomp/libseccomp-golang/README.md @@ -2,7 +2,9 @@ =============================================================================== https://github.com/seccomp/libseccomp-golang -[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/master.svg)](https://travis-ci.org/seccomp/libseccomp-golang) +[![Go Reference](https://pkg.go.dev/badge/github.com/seccomp/libseccomp-golang.svg)](https://pkg.go.dev/github.com/seccomp/libseccomp-golang) +[![validate](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml) +[![test](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml) The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism. The libseccomp API is @@ -26,26 +28,14 @@ list. * https://groups.google.com/d/forum/libseccomp -Documentation is also available at: +Documentation for this package is also available at: -* https://godoc.org/github.com/seccomp/libseccomp-golang +* https://pkg.go.dev/github.com/seccomp/libseccomp-golang ## Installing the package -The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4; -earlier versions may yield unpredictable results. If you meet these -requirements you can install this package using the command below: - # go get github.com/seccomp/libseccomp-golang -## Testing the Library - -A number of tests and lint related recipes are provided in the Makefile, if -you want to run the standard regression tests, you can excute the following: - - # make check - -In order to execute the 'make lint' recipe the 'golint' tool is needed, it -can be found at: +## Contributing -* https://github.com/golang/lint +See [CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md new file mode 100644 index 000000000..c448faa8e --- /dev/null +++ b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md @@ -0,0 +1,47 @@ +The libseccomp-golang Security Vulnerability Handling Process +=============================================================================== +https://github.com/seccomp/libseccomp-golang + +This document document attempts to describe the processes through which +sensitive security relevant bugs can be responsibly disclosed to the +libseccomp-golang project and how the project maintainers should handle these +reports. Just like the other libseccomp-golang process documents, this +document should be treated as a guiding document and not a hard, unyielding set +of regulations; the bug reporters and project maintainers are encouraged to +work together to address the issues as best they can, in a manner which works +best for all parties involved. + +### Reporting Problems + +Problems with the libseccomp-golang library that are not suitable for immediate +public disclosure should be emailed to the current libseccomp-golang +maintainers, the list is below. We typically request at most a 90 day time +period to address the issue before it is made public, but we will make every +effort to address the issue as quickly as possible and shorten the disclosure +window. + +* Paul Moore, paul@paul-moore.com +* Tom Hromatka, tom.hromatka@oracle.com + +### Resolving Sensitive Security Issues + +Upon disclosure of a bug, the maintainers should work together to investigate +the problem and decide on a solution. In order to prevent an early disclosure +of the problem, those working on the solution should do so privately and +outside of the traditional libseccomp-golang development practices. One +possible solution to this is to leverage the GitHub "Security" functionality to +create a private development fork that can be shared among the maintainers, and +optionally the reporter. A placeholder GitHub issue may be created, but +details should remain extremely limited until such time as the problem has been +fixed and responsibly disclosed. If a CVE, or other tag, has been assigned to +the problem, the GitHub issue title should include the vulnerability tag once +the problem has been disclosed. + +### Public Disclosure + +Whenever possible, responsible reporting and patching practices should be +followed, including notification to the linux-distros and oss-security mailing +lists. + +* https://oss-security.openwall.org/wiki/mailing-lists/distros +* https://oss-security.openwall.org/wiki/mailing-lists/oss-security diff --git a/vendor/github.com/seccomp/libseccomp-golang/go.sum b/vendor/github.com/seccomp/libseccomp-golang/go.sum index 72ae16111..e69de29bb 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/go.sum +++ b/vendor/github.com/seccomp/libseccomp-golang/go.sum @@ -1,23 +0,0 @@ -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go index e489b9ebd..8dad12fdb 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go @@ -1,5 +1,3 @@ -// +build linux - // Public API specification for libseccomp Go bindings // Contains public API for the bindings @@ -18,35 +16,36 @@ import ( "unsafe" ) -// C wrapping code - -// #cgo pkg-config: libseccomp // #include // #include import "C" // Exported types -// VersionError denotes that the system libseccomp version is incompatible -// with this package. +// VersionError represents an error when either the system libseccomp version +// or the kernel version is too old to perform the operation requested. type VersionError struct { - message string - minimum string + op string // operation that failed or would fail + major, minor, micro uint // minimally required libseccomp version + curAPI, minAPI uint // current and minimally required API versions +} + +func init() { + // This forces the cgo libseccomp to initialize its internal API support state, + // which is necessary on older versions of libseccomp in order to work + // correctly. + _, _ = getAPI() } func (e VersionError) Error() string { - format := "Libseccomp version too low: " - if e.message != "" { - format += e.message + ": " + if e.minAPI != 0 { + return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d and API level >= %d "+ + "(current version: %d.%d.%d, API level: %d)", + e.op, e.major, e.minor, e.micro, e.minAPI, + verMajor, verMinor, verMicro, e.curAPI) } - format += "minimum supported is " - if e.minimum != "" { - format += e.minimum + ": " - } else { - format += "2.2.0: " - } - format += "detected %d.%d.%d" - return fmt.Sprintf(format, verMajor, verMinor, verMicro) + return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d (current version: %d.%d.%d)", + e.op, e.major, e.minor, e.micro, verMajor, verMinor, verMicro) } // ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a @@ -69,9 +68,61 @@ type ScmpCondition struct { Operand2 uint64 `json:"operand_two,omitempty"` } -// ScmpSyscall represents a Linux System Call +// Seccomp userspace notification structures associated with filters that use the ActNotify action. + +// ScmpSyscall identifies a Linux System Call by its number. type ScmpSyscall int32 +// ScmpFd represents a file-descriptor used for seccomp userspace notifications. +type ScmpFd int32 + +// ScmpNotifData describes the system call context that triggered a notification. +// +// Syscall: the syscall number +// Arch: the filter architecture +// InstrPointer: address of the instruction that triggered a notification +// Args: arguments (up to 6) for the syscall +// +type ScmpNotifData struct { + Syscall ScmpSyscall `json:"syscall,omitempty"` + Arch ScmpArch `json:"arch,omitempty"` + InstrPointer uint64 `json:"instr_pointer,omitempty"` + Args []uint64 `json:"args,omitempty"` +} + +// ScmpNotifReq represents a seccomp userspace notification. See NotifReceive() for +// info on how to pull such a notification. +// +// ID: notification ID +// Pid: process that triggered the notification event +// Flags: filter flags (see seccomp(2)) +// Data: system call context that triggered the notification +// +type ScmpNotifReq struct { + ID uint64 `json:"id,omitempty"` + Pid uint32 `json:"pid,omitempty"` + Flags uint32 `json:"flags,omitempty"` + Data ScmpNotifData `json:"data,omitempty"` +} + +// ScmpNotifResp represents a seccomp userspace notification response. See NotifRespond() +// for info on how to push such a response. +// +// ID: notification ID (must match the corresponding ScmpNotifReq ID) +// Error: must be 0 if no error occurred, or an error constant from package +// syscall (e.g., syscall.EPERM, etc). In the latter case, it's used +// as an error return from the syscall that created the notification. +// Val: return value for the syscall that created the notification. Only +// relevant if Error is 0. +// Flags: userspace notification response flag (e.g., NotifRespFlagContinue) +// +type ScmpNotifResp struct { + ID uint64 `json:"id,omitempty"` + Error int32 `json:"error,omitempty"` + Val uint64 `json:"val,omitempty"` + Flags uint32 `json:"flags,omitempty"` +} + // Exported Constants const ( @@ -83,40 +134,46 @@ const ( // variables are invalid ArchInvalid ScmpArch = iota // ArchNative is the native architecture of the kernel - ArchNative ScmpArch = iota + ArchNative // ArchX86 represents 32-bit x86 syscalls - ArchX86 ScmpArch = iota + ArchX86 // ArchAMD64 represents 64-bit x86-64 syscalls - ArchAMD64 ScmpArch = iota + ArchAMD64 // ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers) - ArchX32 ScmpArch = iota + ArchX32 // ArchARM represents 32-bit ARM syscalls - ArchARM ScmpArch = iota + ArchARM // ArchARM64 represents 64-bit ARM syscalls - ArchARM64 ScmpArch = iota + ArchARM64 // ArchMIPS represents 32-bit MIPS syscalls - ArchMIPS ScmpArch = iota + ArchMIPS // ArchMIPS64 represents 64-bit MIPS syscalls - ArchMIPS64 ScmpArch = iota + ArchMIPS64 // ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers) - ArchMIPS64N32 ScmpArch = iota + ArchMIPS64N32 // ArchMIPSEL represents 32-bit MIPS syscalls (little endian) - ArchMIPSEL ScmpArch = iota + ArchMIPSEL // ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian) - ArchMIPSEL64 ScmpArch = iota + ArchMIPSEL64 // ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian, // 32-bit pointers) - ArchMIPSEL64N32 ScmpArch = iota + ArchMIPSEL64N32 // ArchPPC represents 32-bit POWERPC syscalls - ArchPPC ScmpArch = iota + ArchPPC // ArchPPC64 represents 64-bit POWER syscalls (big endian) - ArchPPC64 ScmpArch = iota + ArchPPC64 // ArchPPC64LE represents 64-bit POWER syscalls (little endian) - ArchPPC64LE ScmpArch = iota + ArchPPC64LE // ArchS390 represents 31-bit System z/390 syscalls - ArchS390 ScmpArch = iota + ArchS390 // ArchS390X represents 64-bit System z/390 syscalls - ArchS390X ScmpArch = iota + ArchS390X + // ArchPARISC represents 32-bit PA-RISC + ArchPARISC + // ArchPARISC64 represents 64-bit PA-RISC + ArchPARISC64 + // ArchRISCV64 represents RISCV64 + ArchRISCV64 ) const ( @@ -125,31 +182,36 @@ const ( // ActInvalid is a placeholder to ensure uninitialized ScmpAction // variables are invalid ActInvalid ScmpAction = iota - // ActKill kills the thread that violated the rule. It is the same as ActKillThread. + // ActKillThread kills the thread that violated the rule. // All other threads from the same thread group will continue to execute. - ActKill ScmpAction = iota + ActKillThread // ActTrap throws SIGSYS - ActTrap ScmpAction = iota + ActTrap + // ActNotify triggers a userspace notification. This action is only usable when + // libseccomp API level 6 or higher is supported. + ActNotify // ActErrno causes the syscall to return a negative error code. This // code can be set with the SetReturnCode method - ActErrno ScmpAction = iota + ActErrno // ActTrace causes the syscall to notify tracing processes with the // given error code. This code can be set with the SetReturnCode method - ActTrace ScmpAction = iota + ActTrace // ActAllow permits the syscall to continue execution - ActAllow ScmpAction = iota + ActAllow // ActLog permits the syscall to continue execution after logging it. // This action is only usable when libseccomp API level 3 or higher is // supported. - ActLog ScmpAction = iota - // ActKillThread kills the thread that violated the rule. It is the same as ActKill. - // All other threads from the same thread group will continue to execute. - ActKillThread ScmpAction = iota + ActLog // ActKillProcess kills the process that violated the rule. // All threads in the thread group are also terminated. // This action is only usable when libseccomp API level 3 or higher is // supported. - ActKillProcess ScmpAction = iota + ActKillProcess + // ActKill kills the thread that violated the rule. + // All other threads from the same thread group will continue to execute. + // + // Deprecated: use ActKillThread + ActKill = ActKillThread ) const ( @@ -162,23 +224,37 @@ const ( CompareInvalid ScmpCompareOp = iota // CompareNotEqual returns true if the argument is not equal to the // given value - CompareNotEqual ScmpCompareOp = iota + CompareNotEqual // CompareLess returns true if the argument is less than the given value - CompareLess ScmpCompareOp = iota + CompareLess // CompareLessOrEqual returns true if the argument is less than or equal // to the given value - CompareLessOrEqual ScmpCompareOp = iota + CompareLessOrEqual // CompareEqual returns true if the argument is equal to the given value - CompareEqual ScmpCompareOp = iota + CompareEqual // CompareGreaterEqual returns true if the argument is greater than or // equal to the given value - CompareGreaterEqual ScmpCompareOp = iota + CompareGreaterEqual // CompareGreater returns true if the argument is greater than the given // value - CompareGreater ScmpCompareOp = iota - // CompareMaskedEqual returns true if the argument is equal to the given - // value, when masked (bitwise &) against the second given value - CompareMaskedEqual ScmpCompareOp = iota + CompareGreater + // CompareMaskedEqual returns true if the masked argument value is + // equal to the masked datum value. Mask is the first argument, and + // datum is the second one. + CompareMaskedEqual +) + +// ErrSyscallDoesNotExist represents an error condition where +// libseccomp is unable to resolve the syscall +var ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name") + +const ( + // Userspace notification response flags + + // NotifRespFlagContinue tells the kernel to continue executing the system + // call that triggered the notification. Must only be used when the notification + // response's error is 0. + NotifRespFlagContinue uint32 = 1 ) // Helpers for types @@ -223,6 +299,12 @@ func GetArchFromString(arch string) (ScmpArch, error) { return ArchS390, nil case "s390x": return ArchS390X, nil + case "parisc": + return ArchPARISC, nil + case "parisc64": + return ArchPARISC64, nil + case "riscv64": + return ArchRISCV64, nil default: return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch) } @@ -263,6 +345,12 @@ func (a ScmpArch) String() string { return "s390" case ArchS390X: return "s390x" + case ArchPARISC: + return "parisc" + case ArchPARISC64: + return "parisc64" + case ArchRISCV64: + return "riscv64" case ArchNative: return "native" case ArchInvalid: @@ -299,7 +387,7 @@ func (a ScmpCompareOp) String() string { // String returns a string representation of a seccomp match action func (a ScmpAction) String() string { switch a & 0xFFFF { - case ActKill, ActKillThread: + case ActKillThread: return "Action: Kill thread" case ActKillProcess: return "Action: Kill process" @@ -310,6 +398,8 @@ func (a ScmpAction) String() string { case ActTrace: return fmt.Sprintf("Action: Notify tracing processes with code %d", (a >> 16)) + case ActNotify: + return "Action: Notify userspace" case ActLog: return "Action: Log system call" case ActAllow: @@ -349,7 +439,7 @@ func GetLibraryVersion() (major, minor, micro uint) { // Returns a positive int containing the API level, or 0 with an error if the // API level could not be detected due to the library being older than v2.4.0. // See the seccomp_api_get(3) man page for details on available API levels: -// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3 +// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3 func GetAPI() (uint, error) { return getAPI() } @@ -359,7 +449,7 @@ func GetAPI() (uint, error) { // Returns an error if the API level could not be set. An error is always // returned if the library is older than v2.4.0 // See the seccomp_api_get(3) man page for details on available API levels: -// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3 +// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3 func SetAPI(api uint) error { return setAPI(api) } @@ -386,7 +476,7 @@ func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) { cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s)) if cString == nil { - return "", fmt.Errorf("could not resolve syscall name for %#x", int32(s)) + return "", ErrSyscallDoesNotExist } defer C.free(unsafe.Pointer(cString)) @@ -409,7 +499,7 @@ func GetSyscallFromName(name string) (ScmpSyscall, error) { result := C.seccomp_syscall_resolve_name(cString) if result == scmpError { - return 0, fmt.Errorf("could not resolve name to syscall: %q", name) + return 0, ErrSyscallDoesNotExist } return ScmpSyscall(result), nil @@ -433,7 +523,7 @@ func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) { result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString) if result == scmpError { - return 0, fmt.Errorf("could not resolve name to syscall: %q on %v", name, arch) + return 0, ErrSyscallDoesNotExist } return ScmpSyscall(result), nil @@ -459,8 +549,8 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo return condStruct, err } - if comparison == CompareInvalid { - return condStruct, fmt.Errorf("invalid comparison operator") + if err := sanitizeCompareOp(comparison); err != nil { + return condStruct, err } else if arg > 5 { return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg) } else if len(values) > 2 { @@ -506,11 +596,10 @@ type ScmpFilter struct { lock sync.Mutex } -// NewFilter creates and returns a new filter context. -// Accepts a default action to be taken for syscalls which match no rules in -// the filter. -// Returns a reference to a valid filter context, or nil and an error if the -// filter context could not be created or an invalid default action was given. +// NewFilter creates and returns a new filter context. Accepts a default action to be +// taken for syscalls which match no rules in the filter. +// Returns a reference to a valid filter context, or nil and an error +// if the filter context could not be created or an invalid default action was given. func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { if err := ensureSupportedVersion(); err != nil { return nil, err @@ -530,8 +619,8 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { filter.valid = true runtime.SetFinalizer(filter, filterFinalizer) - // Enable TSync so all goroutines will receive the same rules - // If the kernel does not support TSYNC, allow us to continue without error + // Enable TSync so all goroutines will receive the same rules. + // If the kernel does not support TSYNC, allow us to continue without error. if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP { filter.Release() return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err) @@ -778,9 +867,8 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) { func (f *ScmpFilter) GetLogBit() (bool, error) { log, err := f.getFilterAttr(filterAttrLog) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) { - return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") + if e := checkAPI("GetLogBit", 3, 2, 4, 0); e != nil { + err = e } return false, err @@ -793,6 +881,65 @@ func (f *ScmpFilter) GetLogBit() (bool, error) { return true, nil } +// GetSSB returns the current state the SSB bit will be set to on the filter +// being loaded, or an error if an issue was encountered retrieving the value. +// The SSB bit tells the kernel that a seccomp user is not interested in enabling +// Speculative Store Bypass mitigation. +// The SSB bit is only usable when libseccomp API level 4 or higher is +// supported. +func (f *ScmpFilter) GetSSB() (bool, error) { + ssb, err := f.getFilterAttr(filterAttrSSB) + if err != nil { + if e := checkAPI("GetSSB", 4, 2, 5, 0); e != nil { + err = e + } + + return false, err + } + + if ssb == 0 { + return false, nil + } + + return true, nil +} + +// GetOptimize returns the current optimization level of the filter, +// or an error if an issue was encountered retrieving the value. +// See SetOptimize for more details. +func (f *ScmpFilter) GetOptimize() (int, error) { + level, err := f.getFilterAttr(filterAttrOptimize) + if err != nil { + if e := checkAPI("GetOptimize", 4, 2, 5, 0); e != nil { + err = e + } + + return 0, err + } + + return int(level), nil +} + +// GetRawRC returns the current state of RawRC flag, or an error +// if an issue was encountered retrieving the value. +// See SetRawRC for more details. +func (f *ScmpFilter) GetRawRC() (bool, error) { + rawrc, err := f.getFilterAttr(filterAttrRawRC) + if err != nil { + if e := checkAPI("GetRawRC", 4, 2, 5, 0); e != nil { + err = e + } + + return false, err + } + + if rawrc == 0 { + return false, nil + } + + return true, nil +} + // SetBadArchAction sets the default action taken on a syscall for an // architecture not in the filter, or an error if an issue was encountered // setting the value. @@ -832,9 +979,73 @@ func (f *ScmpFilter) SetLogBit(state bool) error { err := f.setFilterAttr(filterAttrLog, toSet) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) { - return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") + if e := checkAPI("SetLogBit", 3, 2, 4, 0); e != nil { + err = e + } + } + + return err +} + +// SetSSB sets the state of the SSB bit, which will be applied on filter +// load, or an error if an issue was encountered setting the value. +// The SSB bit is only usable when libseccomp API level 4 or higher is +// supported. +func (f *ScmpFilter) SetSSB(state bool) error { + var toSet C.uint32_t = 0x0 + + if state { + toSet = 0x1 + } + + err := f.setFilterAttr(filterAttrSSB, toSet) + if err != nil { + if e := checkAPI("SetSSB", 4, 2, 5, 0); e != nil { + err = e + } + } + + return err +} + +// SetOptimize sets optimization level of the seccomp filter. By default +// libseccomp generates a set of sequential "if" statements for each rule in +// the filter. SetSyscallPriority can be used to prioritize the order for the +// default cause. The binary tree optimization sorts by syscall numbers and +// generates consistent O(log n) filter traversal for every rule in the filter. +// The binary tree may be advantageous for large filters. Note that +// SetSyscallPriority is ignored when level == 2. +// +// The different optimization levels are: +// 0: Reserved value, not currently used. +// 1: Rules sorted by priority and complexity (DEFAULT). +// 2: Binary tree sorted by syscall number. +func (f *ScmpFilter) SetOptimize(level int) error { + cLevel := C.uint32_t(level) + + err := f.setFilterAttr(filterAttrOptimize, cLevel) + if err != nil { + if e := checkAPI("SetOptimize", 4, 2, 5, 0); e != nil { + err = e + } + } + + return err +} + +// SetRawRC sets whether libseccomp should pass system error codes back to the +// caller, instead of the default ECANCELED. Defaults to false. +func (f *ScmpFilter) SetRawRC(state bool) error { + var toSet C.uint32_t = 0x0 + + if state { + toSet = 0x1 + } + + err := f.setFilterAttr(filterAttrRawRC, toSet) + if err != nil { + if e := checkAPI("SetRawRC", 4, 2, 5, 0); e != nil { + err = e } } @@ -885,9 +1096,6 @@ func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error { // AddRuleConditional adds a single rule for a conditional action on a syscall. // Returns an error if an issue was encountered adding the rule. // All conditions must match for the rule to match. -// There is a bug in library versions below v2.2.1 which can, in some cases, -// cause conditions to be lost when more than one are used. Consequently, -// AddRuleConditional is disabled on library versions lower than v2.2.1 func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error { return f.addRuleGeneric(call, action, false, conds) } @@ -899,9 +1107,6 @@ func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, con // The rule will function exactly as described, but it may not function identically // (or be able to be applied to) all architectures. // Returns an error if an issue was encountered adding the rule. -// There is a bug in library versions below v2.2.1 which can, in some cases, -// cause conditions to be lost when more than one are used. Consequently, -// AddRuleConditionalExact is disabled on library versions lower than v2.2.1 func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error { return f.addRuleGeneric(call, action, true, conds) } @@ -947,3 +1152,36 @@ func (f *ScmpFilter) ExportBPF(file *os.File) error { return nil } + +// Userspace Notification API + +// GetNotifFd returns the userspace notification file descriptor associated with the given +// filter context. Such a file descriptor is only valid after the filter has been loaded +// and only when the filter uses the ActNotify action. The file descriptor can be used to +// retrieve and respond to notifications associated with the filter (see NotifReceive(), +// NotifRespond(), and NotifIDValid()). +func (f *ScmpFilter) GetNotifFd() (ScmpFd, error) { + return f.getNotifFd() +} + +// NotifReceive retrieves a seccomp userspace notification from a filter whose ActNotify +// action has triggered. The caller is expected to process the notification and return a +// response via NotifRespond(). Each invocation of this function returns one +// notification. As multiple notifications may be pending at any time, this function is +// normally called within a polling loop. +func NotifReceive(fd ScmpFd) (*ScmpNotifReq, error) { + return notifReceive(fd) +} + +// NotifRespond responds to a notification retrieved via NotifReceive(). The response Id +// must match that of the corresponding notification retrieved via NotifReceive(). +func NotifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { + return notifRespond(fd, scmpResp) +} + +// NotifIDValid checks if a notification is still valid. An return value of nil means the +// notification is still valid. Otherwise the notification is not valid. This can be used +// to mitigate time-of-check-time-of-use (TOCTOU) attacks as described in seccomp_notify_id_valid(2). +func NotifIDValid(fd ScmpFd, id uint64) error { + return notifIDValid(fd, id) +} diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go index 0982e930f..df4dfb7eb 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go @@ -1,11 +1,10 @@ -// +build linux - // Internal functions for libseccomp Go bindings // No exported functions package seccomp import ( + "errors" "fmt" "syscall" ) @@ -14,16 +13,23 @@ import ( // Get the seccomp header in scope // Need stdlib.h for free() on cstrings +// To compile libseccomp-golang against a specific version of libseccomp: +// cd ../libseccomp && mkdir -p prefix +// ./configure --prefix=$PWD/prefix && make && make install +// cd ../libseccomp-golang +// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make +// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test + // #cgo pkg-config: libseccomp /* #include #include #include -#if SCMP_VER_MAJOR < 2 -#error Minimum supported version of Libseccomp is v2.2.0 -#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2 -#error Minimum supported version of Libseccomp is v2.2.0 +#if (SCMP_VER_MAJOR < 2) || \ + (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 3) || \ + (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR == 3 && SCMP_VER_MICRO < 1) +#error This package requires libseccomp >= v2.3.1 #endif #define ARCH_BAD ~0 @@ -50,6 +56,18 @@ const uint32_t C_ARCH_BAD = ARCH_BAD; #define SCMP_ARCH_S390X ARCH_BAD #endif +#ifndef SCMP_ARCH_PARISC +#define SCMP_ARCH_PARISC ARCH_BAD +#endif + +#ifndef SCMP_ARCH_PARISC64 +#define SCMP_ARCH_PARISC64 ARCH_BAD +#endif + +#ifndef SCMP_ARCH_RISCV64 +#define SCMP_ARCH_RISCV64 ARCH_BAD +#endif + const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE; const uint32_t C_ARCH_X86 = SCMP_ARCH_X86; const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64; @@ -67,6 +85,9 @@ const uint32_t C_ARCH_PPC64 = SCMP_ARCH_PPC64; const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE; const uint32_t C_ARCH_S390 = SCMP_ARCH_S390; const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X; +const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC; +const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64; +const uint32_t C_ARCH_RISCV64 = SCMP_ARCH_RISCV64; #ifndef SCMP_ACT_LOG #define SCMP_ACT_LOG 0x7ffc0000U @@ -80,6 +101,10 @@ const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X; #define SCMP_ACT_KILL_THREAD 0x00000000U #endif +#ifndef SCMP_ACT_NOTIFY +#define SCMP_ACT_NOTIFY 0x7fc00000U +#endif + const uint32_t C_ACT_KILL = SCMP_ACT_KILL; const uint32_t C_ACT_KILL_PROCESS = SCMP_ACT_KILL_PROCESS; const uint32_t C_ACT_KILL_THREAD = SCMP_ACT_KILL_THREAD; @@ -88,19 +113,29 @@ const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0); const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0); const uint32_t C_ACT_LOG = SCMP_ACT_LOG; const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW; +const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY; // The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was // added in v2.4.0 -#if (SCMP_VER_MAJOR < 2) || \ - (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4) +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4 #define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN #endif -const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT; -const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH; -const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP; -const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC; -const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG; +// The following SCMP_FLTATR_* were added in libseccomp v2.5.0. +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5 +#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN +#define SCMP_FLTATR_CTL_OPTIMIZE _SCMP_FLTATR_MIN +#define SCMP_FLTATR_API_SYSRAWRC _SCMP_FLTATR_MIN +#endif + +const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT; +const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH; +const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP; +const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC; +const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG; +const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB; +const uint32_t C_ATTRIBUTE_OPTIMIZE = (uint32_t)SCMP_FLTATR_CTL_OPTIMIZE; +const uint32_t C_ATTRIBUTE_SYSRAWRC = (uint32_t)SCMP_FLTATR_API_SYSRAWRC; const int C_CMP_NE = (int)SCMP_CMP_NE; const int C_CMP_LT = (int)SCMP_CMP_LT; @@ -147,8 +182,7 @@ unsigned int get_micro_version() #endif // The libseccomp API level functions were added in v2.4.0 -#if (SCMP_VER_MAJOR < 2) || \ - (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4) +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4 const unsigned int seccomp_api_get(void) { // libseccomp-golang requires libseccomp v2.2.0, at a minimum, which @@ -189,6 +223,50 @@ void add_struct_arg_cmp( return; } + +// The seccomp notify API functions were added in v2.5.0 +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5 + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +int seccomp_notify_alloc(struct seccomp_notif **req, struct seccomp_notif_resp **resp) { + return -EOPNOTSUPP; +} +int seccomp_notify_fd(const scmp_filter_ctx ctx) { + return -EOPNOTSUPP; +} +void seccomp_notify_free(struct seccomp_notif *req, struct seccomp_notif_resp *resp) { +} +int seccomp_notify_id_valid(int fd, uint64_t id) { + return -EOPNOTSUPP; +} +int seccomp_notify_receive(int fd, struct seccomp_notif *req) { + return -EOPNOTSUPP; +} +int seccomp_notify_respond(int fd, struct seccomp_notif_resp *resp) { + return -EOPNOTSUPP; +} + +#endif */ import "C" @@ -199,10 +277,13 @@ type scmpFilterAttr uint32 const ( filterAttrActDefault scmpFilterAttr = iota - filterAttrActBadArch scmpFilterAttr = iota - filterAttrNNP scmpFilterAttr = iota - filterAttrTsync scmpFilterAttr = iota - filterAttrLog scmpFilterAttr = iota + filterAttrActBadArch + filterAttrNNP + filterAttrTsync + filterAttrLog + filterAttrSSB + filterAttrOptimize + filterAttrRawRC ) const ( @@ -210,9 +291,9 @@ const ( scmpError C.int = -1 // Comparison boundaries to check for architecture validity archStart ScmpArch = ArchNative - archEnd ScmpArch = ArchS390X + archEnd ScmpArch = ArchRISCV64 // Comparison boundaries to check for action validity - actionStart ScmpAction = ActKill + actionStart ScmpAction = ActKillThread actionEnd ScmpAction = ActKillProcess // Comparison boundaries to check for comparison operator validity compareOpStart ScmpCompareOp = CompareNotEqual @@ -220,8 +301,9 @@ const ( ) var ( - // Error thrown on bad filter context - errBadFilter = fmt.Errorf("filter is invalid or uninitialized") + // errBadFilter is thrown on bad filter context. + errBadFilter = errors.New("filter is invalid or uninitialized") + errDefAction = errors.New("requested action matches default action of filter") // Constants representing library major, minor, and micro versions verMajor = uint(C.get_major_version()) verMinor = uint(C.get_minor_version()) @@ -230,19 +312,28 @@ var ( // Nonexported functions -// Check if library version is greater than or equal to the given one -func checkVersionAbove(major, minor, micro uint) bool { - return (verMajor > major) || +// checkVersion returns an error if the libseccomp version being used +// is less than the one specified by major, minor, and micro arguments. +// Argument op is an arbitrary non-empty operation description, which +// is used as a part of the error message returned. +// +// Most users should use checkAPI instead. +func checkVersion(op string, major, minor, micro uint) error { + if (verMajor > major) || (verMajor == major && verMinor > minor) || - (verMajor == major && verMinor == minor && verMicro >= micro) + (verMajor == major && verMinor == minor && verMicro >= micro) { + return nil + } + return &VersionError{ + op: op, + major: major, + minor: minor, + micro: micro, + } } -// Ensure that the library is supported, i.e. >= 2.2.0. func ensureSupportedVersion() error { - if !checkVersionAbove(2, 2, 0) { - return VersionError{} - } - return nil + return checkVersion("seccomp", 2, 3, 1) } // Get the API level @@ -334,8 +425,10 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b switch e := errRc(retCode); e { case syscall.EFAULT: return fmt.Errorf("unrecognized syscall %#x", int32(call)) - case syscall.EPERM: - return fmt.Errorf("requested action matches default action of filter") + // libseccomp >= v2.5.0 returns EACCES, older versions return EPERM. + // TODO: remove EPERM once libseccomp < v2.5.0 is not supported. + case syscall.EPERM, syscall.EACCES: + return errDefAction case syscall.EINVAL: return fmt.Errorf("two checks on same syscall argument") default: @@ -360,14 +453,6 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b return err } } else { - // We don't support conditional filtering in library version v2.1 - if !checkVersionAbove(2, 2, 1) { - return VersionError{ - message: "conditional filtering is not supported", - minimum: "2.2.1", - } - } - argsArr := C.make_arg_cmp_array(C.uint(len(conds))) if argsArr == nil { return fmt.Errorf("error allocating memory for conditions") @@ -460,6 +545,12 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) { return ArchS390, nil case C.C_ARCH_S390X: return ArchS390X, nil + case C.C_ARCH_PARISC: + return ArchPARISC, nil + case C.C_ARCH_PARISC64: + return ArchPARISC64, nil + case C.C_ARCH_RISCV64: + return ArchRISCV64, nil default: return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a)) } @@ -500,6 +591,12 @@ func (a ScmpArch) toNative() C.uint32_t { return C.C_ARCH_S390 case ArchS390X: return C.C_ARCH_S390X + case ArchPARISC: + return C.C_ARCH_PARISC + case ArchPARISC64: + return C.C_ARCH_PARISC64 + case ArchRISCV64: + return C.C_ARCH_RISCV64 case ArchNative: return C.C_ARCH_NATIVE default: @@ -532,8 +629,6 @@ func (a ScmpCompareOp) toNative() C.int { func actionFromNative(a C.uint32_t) (ScmpAction, error) { aTmp := a & 0xFFFF switch a & 0xFFFF0000 { - case C.C_ACT_KILL: - return ActKill, nil case C.C_ACT_KILL_PROCESS: return ActKillProcess, nil case C.C_ACT_KILL_THREAD: @@ -548,6 +643,8 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) { return ActLog, nil case C.C_ACT_ALLOW: return ActAllow, nil + case C.C_ACT_NOTIFY: + return ActNotify, nil default: return 0x0, fmt.Errorf("unrecognized action %#x", uint32(a)) } @@ -556,8 +653,6 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) { // Only use with sanitized actions, no error handling func (a ScmpAction) toNative() C.uint32_t { switch a & 0xFFFF { - case ActKill: - return C.C_ACT_KILL case ActKillProcess: return C.C_ACT_KILL_PROCESS case ActKillThread: @@ -572,6 +667,8 @@ func (a ScmpAction) toNative() C.uint32_t { return C.C_ACT_LOG case ActAllow: return C.C_ACT_ALLOW + case ActNotify: + return C.C_ACT_NOTIFY default: return 0x0 } @@ -590,7 +687,197 @@ func (a scmpFilterAttr) toNative() uint32 { return uint32(C.C_ATTRIBUTE_TSYNC) case filterAttrLog: return uint32(C.C_ATTRIBUTE_LOG) + case filterAttrSSB: + return uint32(C.C_ATTRIBUTE_SSB) + case filterAttrOptimize: + return uint32(C.C_ATTRIBUTE_OPTIMIZE) + case filterAttrRawRC: + return uint32(C.C_ATTRIBUTE_SYSRAWRC) default: return 0x0 } } + +func syscallFromNative(a C.int) ScmpSyscall { + return ScmpSyscall(a) +} + +func notifReqFromNative(req *C.struct_seccomp_notif) (*ScmpNotifReq, error) { + scmpArgs := make([]uint64, 6) + for i := 0; i < len(scmpArgs); i++ { + scmpArgs[i] = uint64(req.data.args[i]) + } + + arch, err := archFromNative(req.data.arch) + if err != nil { + return nil, err + } + + scmpData := ScmpNotifData{ + Syscall: syscallFromNative(req.data.nr), + Arch: arch, + InstrPointer: uint64(req.data.instruction_pointer), + Args: scmpArgs, + } + + scmpReq := &ScmpNotifReq{ + ID: uint64(req.id), + Pid: uint32(req.pid), + Flags: uint32(req.flags), + Data: scmpData, + } + + return scmpReq, nil +} + +func (scmpResp *ScmpNotifResp) toNative(resp *C.struct_seccomp_notif_resp) { + resp.id = C.__u64(scmpResp.ID) + resp.val = C.__s64(scmpResp.Val) + resp.error = (C.__s32(scmpResp.Error) * -1) // kernel requires a negated value + resp.flags = C.__u32(scmpResp.Flags) +} + +// checkAPI checks that both the API level and the seccomp version is equal to +// or greater than the specified minLevel and major, minor, micro, +// respectively, and returns an error otherwise. Argument op is an arbitrary +// non-empty operation description, used as a part of the error message +// returned. +func checkAPI(op string, minLevel uint, major, minor, micro uint) error { + // Ignore error from getAPI, as it returns level == 0 in case of error. + level, _ := getAPI() + if level >= minLevel { + return checkVersion(op, major, minor, micro) + } + return &VersionError{ + op: op, + curAPI: level, + minAPI: minLevel, + major: major, + minor: minor, + micro: micro, + } +} + +// Userspace Notification API +// Calls to C.seccomp_notify* hidden from seccomp.go + +func notifSupported() error { + return checkAPI("seccomp notification", 6, 2, 5, 0) +} + +func (f *ScmpFilter) getNotifFd() (ScmpFd, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if !f.valid { + return -1, errBadFilter + } + if err := notifSupported(); err != nil { + return -1, err + } + + fd := C.seccomp_notify_fd(f.filterCtx) + + return ScmpFd(fd), nil +} + +func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) { + var req *C.struct_seccomp_notif + var resp *C.struct_seccomp_notif_resp + + if err := notifSupported(); err != nil { + return nil, err + } + + // we only use the request here; the response is unused + if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 { + return nil, errRc(retCode) + } + + defer func() { + C.seccomp_notify_free(req, resp) + }() + + for { + retCode, errno := C.seccomp_notify_receive(C.int(fd), req) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return nil, errno + } + + return nil, errRc(retCode) + } + + return notifReqFromNative(req) +} + +func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { + var req *C.struct_seccomp_notif + var resp *C.struct_seccomp_notif_resp + + if err := notifSupported(); err != nil { + return err + } + + // we only use the response here; the request is discarded + if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 { + return errRc(retCode) + } + + defer func() { + C.seccomp_notify_free(req, resp) + }() + + scmpResp.toNative(resp) + + for { + retCode, errno := C.seccomp_notify_respond(C.int(fd), resp) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return errno + } + + return errRc(retCode) + } + + return nil +} + +func notifIDValid(fd ScmpFd, id uint64) error { + if err := notifSupported(); err != nil { + return err + } + + for { + retCode, errno := C.seccomp_notify_id_valid(C.int(fd), C.uint64_t(id)) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return errno + } + + return errRc(retCode) + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa4..b042c896f 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec07..c7787f77c 100644 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa7..71cdbbc35 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index b3919d5ea..8b3f6d373 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,9 +2,8 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 + github.com/stretchr/testify v1.7.0 + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 ) go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 694c18b84..e5fdc85bf 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -1,8 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 337704457..5ff0aef6d 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d267..95d8e59da 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 000000000..da867903e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 000000000..1701af2a3 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a..7880b8f94 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f0..339515b8b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182..759448783 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f..fa1245b18 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e..880853f5a 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83..960bf6f2c 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore index e3c2fc2f1..eb3d5f517 100644 --- a/vendor/github.com/ulikunitz/xz/.gitignore +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -23,3 +23,6 @@ cmd/xb/xb # default compression test file enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE index 009b84870..8a7f0877d 100644 --- a/vendor/github.com/ulikunitz/xz/LICENSE +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2021 Ulrich Kunitz +Copyright (c) 2014-2022 Ulrich Kunitz All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md index 0a2dc8284..554718521 100644 --- a/vendor/github.com/ulikunitz/xz/README.md +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -53,6 +53,10 @@ func main() { } ``` +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + ## Using the gxz compression tool The package includes a gxz command line utility for compression and diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index 594e0c7fe..a3d6f1925 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -86,11 +86,20 @@ ## Log +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + ### 2021-02-02 Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The function allocated a slice of records immediately after reading the value -without further checks. Since the number has been too large the make function +without further checks. Sincex the number has been too large the make function did panic. The fix is to check the number against the expected number of records before allocating the records. diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go index e48450c2c..b30f1ec97 100644 --- a/vendor/github.com/ulikunitz/xz/bits.go +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/crc.go b/vendor/github.com/ulikunitz/xz/crc.go index a5c57fb61..13a58cb30 100644 --- a/vendor/github.com/ulikunitz/xz/crc.go +++ b/vendor/github.com/ulikunitz/xz/crc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index c98c12dfd..597fca2dc 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go index f723cf252..dae159db5 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go index cc60a6b5c..b4cf8b75e 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go index c6432913f..5322342ee 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go index f1de88b44..a98983356 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go index 6c20c77ba..f4627ea11 100644 --- a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -31,8 +31,7 @@ import ( // printed. There is no control over the order of the items printed and // the format. The full format is: // -// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message -// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message const ( Ldate = 1 << iota // the date: 2009-01-23 Ltime // the time: 01:23:23 diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index 2a7bd19ec..2b39da6f7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index d2c07e8c9..201091709 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go index 939be8845..9dfdf28b2 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/breader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go index 2761de5f0..af41d5b2d 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/buffer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go index 040874c1a..f27e31a4a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index cbb943a06..3765484e6 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index 8cd616ef9..d5b814f0a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index 20b256a9d..76b713106 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index 60ed9aef1..b447d8ec4 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go index 5ed057a71..e40938318 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 056f89757..4b3916eab 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go index 0fb7910bc..f66e9cdd9 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go index 04276c816..1ae7d80ca 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index be54dd85f..081fc840b 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index 6e0edfc8c..1ea5320a0 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -59,8 +59,7 @@ func (lc *lengthCodec) init() { // Encode encodes the length offset. The length offset l can be compute by // subtracting minMatchLen (2) from the actual length. // -// l = length - minMatchLen -// +// l = length - minMatchLen func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, ) (err error) { if l > maxMatchLen-minMatchLen { diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index 0bfc763ce..e4ef5fc59 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go index 96ebda0fd..02dfb8bf5 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index 026ce48af..7b7eddc3d 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go index 9a2648e0f..2feccba11 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/prob.go +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go index f229fc9fe..15b754ccb 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/properties.go +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 57f1ab904..4b0fee3ff 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go index 2ed13c886..ae911c389 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -70,7 +70,7 @@ func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { return nil, err } if r.h.dictCap < MinDictCap { - return nil, errors.New("lzma: dictionary capacity too small") + r.h.dictCap = MinDictCap } dictCap := r.h.dictCap if c.DictCap > dictCap { diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index de3da37ee..f36e26505 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 09d62f7d9..34779c513 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go index 6e927e935..36b29b598 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go index d0d220fe1..e8f89811d 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go index dfaaec95b..97bbafa11 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go index 4f1bb3393..bd5f42ee8 100644 --- a/vendor/github.com/ulikunitz/xz/lzmafilter.go +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go index 95240135d..6a56a2612 100644 --- a/vendor/github.com/ulikunitz/xz/none-check.go +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 7f974ffc5..bde1412cf 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index 6b3a66620..f693e0aef 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go index ae11f8ffd..9fed24aa8 100644 --- a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go +++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go @@ -92,11 +92,12 @@ func NewDiscardFilePutter() FilePutter { } type bitBucketFilePutter struct { + buffer [32 * 1024]byte // 32 kB is the buffer size currently used by io.Copy, as of August 2021. } func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { c := crc64.New(CRCTable) - i, err := io.Copy(c, r) + i, err := io.CopyBuffer(c, r, bbfp.buffer[:]) return i, c.Sum(nil), err } diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/willf/bitset/.travis.yml deleted file mode 100644 index 094aa5ce0..000000000 --- a/vendor/github.com/willf/bitset/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - travis - -go: - - "1.11.x" - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/willf/bitset/LICENSE deleted file mode 100644 index 59cab8a93..000000000 --- a/vendor/github.com/willf/bitset/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 Will Fitzgerald. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/willf/bitset/README.md deleted file mode 100644 index 50338e71d..000000000 --- a/vendor/github.com/willf/bitset/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# bitset - -*Go language library to map between non-negative integers and boolean values* - -[![Test](https://github.com/willf/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) -[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/willf/bitset?tab=doc)](https://pkg.go.dev/github.com/willf/bitset?tab=doc) - - -## Description - -Package bitset implements bitsets, a mapping between non-negative integers and boolean values. -It should be more efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing individual integers. - -But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. - -Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. - -### Example use: - -```go -package main - -import ( - "fmt" - "math/rand" - - "github.com/willf/bitset" -) - -func main() { - fmt.Printf("Hello from BitSet!\n") - var b bitset.BitSet - // play some Go Fish - for i := 0; i < 100; i++ { - card1 := uint(rand.Intn(52)) - card2 := uint(rand.Intn(52)) - b.Set(card1) - if b.Test(card2) { - fmt.Println("Go Fish!") - } - b.Clear(card1) - } - - // Chaining - b.Set(10).Set(11) - - for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { - fmt.Println("The following bit is set:", i) - } - if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { - fmt.Println("Intersection works.") - } else { - fmt.Println("Intersection doesn't work???") - } -} -``` - -As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. - -Package documentation is at: https://pkg.go.dev/github.com/willf/bitset?tab=doc - -## Memory Usage - -The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). - -## Implementation Note - -Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. - -It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. - -## Installation - -```bash -go get github.com/willf/bitset -``` - -## Contributing - -If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") - -## Running all tests - -Before committing the code, please check if it passes tests, has adequate coverage, etc. -```bash -go test -go test -cover -``` diff --git a/vendor/github.com/willf/bitset/azure-pipelines.yml b/vendor/github.com/willf/bitset/azure-pipelines.yml deleted file mode 100644 index f9b295918..000000000 --- a/vendor/github.com/willf/bitset/azure-pipelines.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Go -# Build your Go project. -# Add steps that test, save build artifacts, deploy, and more: -# https://docs.microsoft.com/azure/devops/pipelines/languages/go - -trigger: -- master - -pool: - vmImage: 'Ubuntu-16.04' - -variables: - GOBIN: '$(GOPATH)/bin' # Go binaries path - GOROOT: '/usr/local/go1.11' # Go installation path - GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path - modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code - -steps: -- script: | - mkdir -p '$(GOBIN)' - mkdir -p '$(GOPATH)/pkg' - mkdir -p '$(modulePath)' - shopt -s extglob - shopt -s dotglob - mv !(gopath) '$(modulePath)' - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - displayName: 'Set up the Go workspace' - -- script: | - go version - go get -v -t -d ./... - if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure - fi - go build -v . - workingDirectory: '$(modulePath)' - displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/willf/bitset/bitset.go deleted file mode 100644 index 21e889da2..000000000 --- a/vendor/github.com/willf/bitset/bitset.go +++ /dev/null @@ -1,931 +0,0 @@ -/* -Package bitset implements bitsets, a mapping -between non-negative integers and boolean values. It should be more -efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing -individual integers. - -But it also provides set intersection, union, difference, -complement, and symmetric operations, as well as tests to -check whether any, all, or no bits are set, and querying a -bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the -memory allocation is approximately Max bits, where Max is -the largest set bit. BitSets are never shrunk. On creation, -a hint can be given for the number of bits that will be used. - -Many of the methods, including Set,Clear, and Flip, return -a BitSet pointer, which allows for chaining. - -Example use: - - import "bitset" - var b BitSet - b.Set(10).Set(11) - if b.Test(1000) { - b.Clear(1000) - } - if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { - fmt.Println("Intersection works.") - } - -As an alternative to BitSets, one should check out the 'big' package, -which provides a (less set-theoretical) view of bitsets. - -*/ -package bitset - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// the wordSize of a bit set -const wordSize = uint(64) - -// log2WordSize is lg(wordSize) -const log2WordSize = uint(6) - -// allBits has every bit set -const allBits uint64 = 0xffffffffffffffff - -// default binary BigEndian -var binaryOrder binary.ByteOrder = binary.BigEndian - -// default json encoding base64.URLEncoding -var base64Encoding = base64.URLEncoding - -// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) -func Base64StdEncoding() { base64Encoding = base64.StdEncoding } - -// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) -func LittleEndian() { binaryOrder = binary.LittleEndian } - -// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. -type BitSet struct { - length uint - set []uint64 -} - -// Error is used to distinguish errors (panics) generated in this package. -type Error string - -// safeSet will fixup b.set to be non-nil and return the field value -func (b *BitSet) safeSet() []uint64 { - if b.set == nil { - b.set = make([]uint64, wordsNeeded(0)) - } - return b.set -} - -// From is a constructor used to create a BitSet from an array of integers -func From(buf []uint64) *BitSet { - return &BitSet{uint(len(buf)) * 64, buf} -} - -// Bytes returns the bitset as array of integers -func (b *BitSet) Bytes() []uint64 { - return b.set -} - -// wordsNeeded calculates the number of words needed for i bits -func wordsNeeded(i uint) int { - if i > (Cap() - wordSize + 1) { - return int(Cap() >> log2WordSize) - } - return int((i + (wordSize - 1)) >> log2WordSize) -} - -// New creates a new BitSet with a hint that length bits will be required -func New(length uint) (bset *BitSet) { - defer func() { - if r := recover(); r != nil { - bset = &BitSet{ - 0, - make([]uint64, 0), - } - } - }() - - bset = &BitSet{ - length, - make([]uint64, wordsNeeded(length)), - } - - return bset -} - -// Cap returns the total possible capacity, or number of bits -func Cap() uint { - return ^uint(0) -} - -// Len returns the number of bits in the BitSet. -// Note the difference to method Count, see example. -func (b *BitSet) Len() uint { - return b.length -} - -// extendSetMaybe adds additional words to incorporate new bits if needed -func (b *BitSet) extendSetMaybe(i uint) { - if i >= b.length { // if we need more bits, make 'em - if i >= Cap() { - panic("You are exceeding the capacity") - } - nsize := wordsNeeded(i + 1) - if b.set == nil { - b.set = make([]uint64, nsize) - } else if cap(b.set) >= nsize { - b.set = b.set[:nsize] // fast resize - } else if len(b.set) < nsize { - newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x - copy(newset, b.set) - b.set = newset - } - b.length = i + 1 - } -} - -// Test whether bit i is set. -func (b *BitSet) Test(i uint) bool { - if i >= b.length { - return false - } - return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 -} - -// Set bit i to 1, the capacity of the bitset is automatically -// increased accordingly. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Set(i uint) *BitSet { - b.extendSetMaybe(i) - b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) - return b -} - -// Clear bit i to 0 -func (b *BitSet) Clear(i uint) *BitSet { - if i >= b.length { - return b - } - b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) - return b -} - -// SetTo sets bit i to value. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) SetTo(i uint, value bool) *BitSet { - if value { - return b.Set(i) - } - return b.Clear(i) -} - -// Flip bit at i. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Flip(i uint) *BitSet { - if i >= b.length { - return b.Set(i) - } - b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) - return b -} - -// Shrink shrinks BitSet so that the provided value is the last possible -// set value. It clears all bits > the provided index and reduces the size -// and length of the set. -// -// Note that the parameter value is not the new length in bits: it is the -// maximal value that can be stored in the bitset after the function call. -// The new length in bits is the parameter value + 1. Thus it is not possible -// to use this function to set the length to 0, the minimal value of the length -// after this function call is 1. -// -// A new slice is allocated to store the new bits, so you may see an increase in -// memory usage until the GC runs. Normally this should not be a problem, but if you -// have an extremely large BitSet its important to understand that the old BitSet will -// remain in memory until the GC frees it. -func (b *BitSet) Shrink(lastbitindex uint) *BitSet { - length := lastbitindex + 1 - idx := wordsNeeded(length) - if idx > len(b.set) { - return b - } - shrunk := make([]uint64, idx) - copy(shrunk, b.set[:idx]) - b.set = shrunk - b.length = length - b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) - return b -} - -// Compact shrinks BitSet to so that we preserve all set bits, while minimizing -// memory usage. Compact calls Shrink. -func (b *BitSet) Compact() *BitSet { - idx := len(b.set) - 1 - for ; idx >= 0 && b.set[idx] == 0; idx-- { - } - newlength := uint((idx + 1) << log2WordSize) - if newlength >= b.length { - return b // nothing to do - } - if newlength > 0 { - return b.Shrink(newlength - 1) - } - // We preserve one word - return b.Shrink(63) -} - -// InsertAt takes an index which indicates where a bit should be -// inserted. Then it shifts all the bits in the set to the left by 1, starting -// from the given index position, and sets the index position to 0. -// -// Depending on the size of your BitSet, and where you are inserting the new entry, -// this method could be extremely slow and in some cases might cause the entire BitSet -// to be recopied. -func (b *BitSet) InsertAt(idx uint) *BitSet { - insertAtElement := (idx >> log2WordSize) - - // if length of set is a multiple of wordSize we need to allocate more space first - if b.isLenExactMultiple() { - b.set = append(b.set, uint64(0)) - } - - var i uint - for i = uint(len(b.set) - 1); i > insertAtElement; i-- { - // all elements above the position where we want to insert can simply by shifted - b.set[i] <<= 1 - - // we take the most significant bit of the previous element and set it as - // the least significant bit of the current element - b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 - } - - // generate a mask to extract the data that we need to shift left - // within the element where we insert a bit - dataMask := ^(uint64(1)< 0x40000 { - buffer.WriteString("...") - break - } - buffer.WriteString(strconv.FormatInt(int64(i), 10)) - i, e = b.NextSet(i + 1) - if e { - buffer.WriteString(",") - } - } - buffer.WriteString("}") - return buffer.String() -} - -// DeleteAt deletes the bit at the given index position from -// within the bitset -// All the bits residing on the left of the deleted bit get -// shifted right by 1 -// The running time of this operation may potentially be -// relatively slow, O(length) -func (b *BitSet) DeleteAt(i uint) *BitSet { - // the index of the slice element where we'll delete a bit - deleteAtElement := i >> log2WordSize - - // generate a mask for the data that needs to be shifted right - // within that slice element that gets modified - dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) - - // extract the data that we'll shift right from the slice element - data := b.set[deleteAtElement] & dataMask - - // set the masked area to 0 while leaving the rest as it is - b.set[deleteAtElement] &= ^dataMask - - // shift the previously extracted data to the right and then - // set it in the previously masked area - b.set[deleteAtElement] |= (data >> 1) & dataMask - - // loop over all the consecutive slice elements to copy each - // lowest bit into the highest position of the previous element, - // then shift the entire content to the right by 1 - for i := int(deleteAtElement) + 1; i < len(b.set); i++ { - b.set[i-1] |= (b.set[i] & 1) << 63 - b.set[i] >>= 1 - } - - b.length = b.length - 1 - - return b -} - -// NextSet returns the next bit set from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no set bit found) -// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} -// -// Users concerned with performance may want to use NextSetMany to -// retrieve several values at once. -func (b *BitSet) NextSet(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - if w != 0 { - return i + trailingZeroes64(w), true - } - x = x + 1 - for x < len(b.set) { - if b.set[x] != 0 { - return uint(x)*wordSize + trailingZeroes64(b.set[x]), true - } - x = x + 1 - - } - return 0, false -} - -// NextSetMany returns many next bit sets from the specified index, -// including possibly the current index and up to cap(buffer). -// If the returned slice has len zero, then no more set bits were found -// -// buffer := make([]uint, 256) // this should be reused -// j := uint(0) -// j, buffer = bitmap.NextSetMany(j, buffer) -// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { -// for k := range buffer { -// do something with buffer[k] -// } -// j += 1 -// } -// -// -// It is possible to retrieve all set bits as follow: -// -// indices := make([]uint, bitmap.Count()) -// bitmap.NextSetMany(0, indices) -// -// However if bitmap.Count() is large, it might be preferable to -// use several calls to NextSetMany, for performance reasons. -func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { - myanswer := buffer - capacity := cap(buffer) - x := int(i >> log2WordSize) - if x >= len(b.set) || capacity == 0 { - return 0, myanswer[:0] - } - skip := i & (wordSize - 1) - word := b.set[x] >> skip - myanswer = myanswer[:capacity] - size := int(0) - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + i - size++ - if size == capacity { - goto End - } - word = word ^ t - } - x++ - for idx, word := range b.set[x:] { - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + (uint(x+idx) << 6) - size++ - if size == capacity { - goto End - } - word = word ^ t - } - } -End: - if size > 0 { - return myanswer[size-1], myanswer[:size] - } - return 0, myanswer[:0] -} - -// NextClear returns the next clear bit from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no bit found i.e. all bits are set) -func (b *BitSet) NextClear(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - wA := allBits >> (i & (wordSize - 1)) - index := i + trailingZeroes64(^w) - if w != wA && index < b.length { - return index, true - } - x++ - for x < len(b.set) { - index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) - if b.set[x] != allBits && index < b.length { - return index, true - } - x++ - } - return 0, false -} - -// ClearAll clears the entire BitSet -func (b *BitSet) ClearAll() *BitSet { - if b != nil && b.set != nil { - for i := range b.set { - b.set[i] = 0 - } - } - return b -} - -// wordCount returns the number of words used in a bit set -func (b *BitSet) wordCount() int { - return len(b.set) -} - -// Clone this BitSet -func (b *BitSet) Clone() *BitSet { - c := New(b.length) - if b.set != nil { // Clone should not modify current object - copy(c.set, b.set) - } - return c -} - -// Copy into a destination BitSet -// Returning the size of the destination BitSet -// like array copy -func (b *BitSet) Copy(c *BitSet) (count uint) { - if c == nil { - return - } - if b.set != nil { // Copy should not modify current object - copy(c.set, b.set) - } - count = c.length - if b.length < c.length { - count = b.length - } - return -} - -// Count (number of set bits). -// Also known as "popcount" or "popularity count". -func (b *BitSet) Count() uint { - if b != nil && b.set != nil { - return uint(popcntSlice(b.set)) - } - return 0 -} - -// Equal tests the equivalence of two BitSets. -// False if they are of different sizes, otherwise true -// only if all the same bits are set -func (b *BitSet) Equal(c *BitSet) bool { - if c == nil || b == nil { - return c == b - } - if b.length != c.length { - return false - } - if b.length == 0 { // if they have both length == 0, then could have nil set - return true - } - // testing for equality shoud not transform the bitset (no call to safeSet) - - for p, v := range b.set { - if c.set[p] != v { - return false - } - } - return true -} - -func panicIfNull(b *BitSet) { - if b == nil { - panic(Error("BitSet must not be null")) - } -} - -// Difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - result = b.Clone() // clone b (in case b is bigger than compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - result.set[i] = b.set[i] &^ compare.set[i] - } - return -} - -// DifferenceCardinality computes the cardinality of the differnce -func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - cnt := uint64(0) - cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) - cnt += popcntSlice(b.set[l:]) - return uint(cnt) -} - -// InPlaceDifference computes the difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) InPlaceDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &^= compare.set[i] - } -} - -// Convenience function: return two bitsets ordered by -// increasing length. Note: neither can be nil -func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { - if a.length <= b.length { - ap, bp = a, b - } else { - ap, bp = b, a - } - return -} - -// Intersection of base set and other set -// This is the BitSet equivalent of & (and) -func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = New(b.length) - for i, word := range b.set { - result.set[i] = word & compare.set[i] - } - return -} - -// IntersectionCardinality computes the cardinality of the union -func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntAndSlice(b.set, compare.set) - return uint(cnt) -} - -// InPlaceIntersection destructively computes the intersection of -// base set and the compare set. -// This is the BitSet equivalent of & (and) -func (b *BitSet) InPlaceIntersection(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &= compare.set[i] - } - for i := l; i < len(b.set); i++ { - b.set[i] = 0 - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } -} - -// Union of base set and other set -// This is the BitSet equivalent of | (or) -func (b *BitSet) Union(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word | compare.set[i] - } - return -} - -// UnionCardinality computes the cardinality of the uniton of the base set -// and the compare set. -func (b *BitSet) UnionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntOrSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceUnion creates the destructive union of base set and compare set. -// This is the BitSet equivalent of | (or). -func (b *BitSet) InPlaceUnion(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] |= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - // compare is bigger, so clone it - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word ^ compare.set[i] - } - return -} - -// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference -func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntXorSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] ^= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// Is the length an exact multiple of word sizes? -func (b *BitSet) isLenExactMultiple() bool { - return b.length%wordSize == 0 -} - -// Clean last word by setting unused bits to 0 -func (b *BitSet) cleanLastWord() { - if !b.isLenExactMultiple() { - b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) - } -} - -// Complement computes the (local) complement of a biset (up to length bits) -func (b *BitSet) Complement() (result *BitSet) { - panicIfNull(b) - result = New(b.length) - for i, word := range b.set { - result.set[i] = ^word - } - result.cleanLastWord() - return -} - -// All returns true if all bits are set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) All() bool { - panicIfNull(b) - return b.Count() == b.length -} - -// None returns true if no bit is set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) None() bool { - panicIfNull(b) - if b != nil && b.set != nil { - for _, word := range b.set { - if word > 0 { - return false - } - } - return true - } - return true -} - -// Any returns true if any bit is set, false otherwise -func (b *BitSet) Any() bool { - panicIfNull(b) - return !b.None() -} - -// IsSuperSet returns true if this is a superset of the other set -func (b *BitSet) IsSuperSet(other *BitSet) bool { - for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { - if !b.Test(i) { - return false - } - } - return true -} - -// IsStrictSuperSet returns true if this is a strict superset of the other set -func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { - return b.Count() > other.Count() && b.IsSuperSet(other) -} - -// DumpAsBits dumps a bit set as a string of bits -func (b *BitSet) DumpAsBits() string { - if b.set == nil { - return "." - } - buffer := bytes.NewBufferString("") - i := len(b.set) - 1 - for ; i >= 0; i-- { - fmt.Fprintf(buffer, "%064b.", b.set[i]) - } - return buffer.String() -} - -// BinaryStorageSize returns the binary storage requirements -func (b *BitSet) BinaryStorageSize() int { - return binary.Size(uint64(0)) + binary.Size(b.set) -} - -// WriteTo writes a BitSet to a stream -func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { - length := uint64(b.length) - - // Write length - err := binary.Write(stream, binaryOrder, length) - if err != nil { - return 0, err - } - - // Write set - err = binary.Write(stream, binaryOrder, b.set) - return int64(b.BinaryStorageSize()), err -} - -// ReadFrom reads a BitSet from a stream written using WriteTo -func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { - var length uint64 - - // Read length first - err := binary.Read(stream, binaryOrder, &length) - if err != nil { - return 0, err - } - newset := New(uint(length)) - - if uint64(newset.length) != length { - return 0, errors.New("unmarshalling error: type mismatch") - } - - // Read remaining bytes as set - err = binary.Read(stream, binaryOrder, newset.set) - if err != nil { - return 0, err - } - - *b = *newset - return int64(b.BinaryStorageSize()), nil -} - -// MarshalBinary encodes a BitSet into a binary form and returns the result. -func (b *BitSet) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - writer := bufio.NewWriter(&buf) - - _, err := b.WriteTo(writer) - if err != nil { - return []byte{}, err - } - - err = writer.Flush() - - return buf.Bytes(), err -} - -// UnmarshalBinary decodes the binary form generated by MarshalBinary. -func (b *BitSet) UnmarshalBinary(data []byte) error { - buf := bytes.NewReader(data) - reader := bufio.NewReader(buf) - - _, err := b.ReadFrom(reader) - - return err -} - -// MarshalJSON marshals a BitSet as a JSON structure -func (b *BitSet) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) - _, err := b.WriteTo(buffer) - if err != nil { - return nil, err - } - - // URLEncode all bytes - return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) -} - -// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON -func (b *BitSet) UnmarshalJSON(data []byte) error { - // Unmarshal as string - var s string - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - // URLDecode string - buf, err := base64Encoding.DecodeString(s) - if err != nil { - return err - } - - _, err = b.ReadFrom(bytes.NewReader(buf)) - return err -} diff --git a/vendor/github.com/willf/bitset/go.mod b/vendor/github.com/willf/bitset/go.mod deleted file mode 100644 index 583ecab78..000000000 --- a/vendor/github.com/willf/bitset/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/willf/bitset - -go 1.14 diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/willf/bitset/popcnt.go deleted file mode 100644 index 76577a838..000000000 --- a/vendor/github.com/willf/bitset/popcnt.go +++ /dev/null @@ -1,53 +0,0 @@ -package bitset - -// bit population count, take from -// https://code.google.com/p/go/issues/detail?id=4988#c11 -// credit: https://code.google.com/u/arnehormann/ -func popcount(x uint64) (n uint64) { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return x >> 56 -} - -func popcntSliceGo(s []uint64) uint64 { - cnt := uint64(0) - for _, x := range s { - cnt += popcount(x) - } - return cnt -} - -func popcntMaskSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] &^ m[i]) - } - return cnt -} - -func popcntAndSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] & m[i]) - } - return cnt -} - -func popcntOrSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] | m[i]) - } - return cnt -} - -func popcntXorSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] ^ m[i]) - } - return cnt -} diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/willf/bitset/popcnt_19.go deleted file mode 100644 index fc8ff4f36..000000000 --- a/vendor/github.com/willf/bitset/popcnt_19.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func popcntSlice(s []uint64) uint64 { - var cnt int - for _, x := range s { - cnt += bits.OnesCount64(x) - } - return uint64(cnt) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] &^ m[i]) - } - return uint64(cnt) -} - -func popcntAndSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] & m[i]) - } - return uint64(cnt) -} - -func popcntOrSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] | m[i]) - } - return uint64(cnt) -} - -func popcntXorSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] ^ m[i]) - } - return uint64(cnt) -} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/willf/bitset/popcnt_amd64.go deleted file mode 100644 index 4cf64f24a..000000000 --- a/vendor/github.com/willf/bitset/popcnt_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -package bitset - -// *** the following functions are defined in popcnt_amd64.s - -//go:noescape - -func hasAsm() bool - -// useAsm is a flag used to select the GO or ASM implementation of the popcnt function -var useAsm = hasAsm() - -//go:noescape - -func popcntSliceAsm(s []uint64) uint64 - -//go:noescape - -func popcntMaskSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntAndSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntOrSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntXorSliceAsm(s, m []uint64) uint64 - -func popcntSlice(s []uint64) uint64 { - if useAsm { - return popcntSliceAsm(s) - } - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - if useAsm { - return popcntMaskSliceAsm(s, m) - } - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - if useAsm { - return popcntAndSliceAsm(s, m) - } - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - if useAsm { - return popcntOrSliceAsm(s, m) - } - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - if useAsm { - return popcntXorSliceAsm(s, m) - } - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/willf/bitset/popcnt_amd64.s deleted file mode 100644 index 666c0dcc1..000000000 --- a/vendor/github.com/willf/bitset/popcnt_amd64.s +++ /dev/null @@ -1,104 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -TEXT ·hasAsm(SB),4,$0-1 -MOVQ $1, AX -CPUID -SHRQ $23, CX -ANDQ $1, CX -MOVB CX, ret+0(FP) -RET - -#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 - -TEXT ·popcntSliceAsm(SB),4,$0-32 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntSliceEnd -popcntSliceLoop: -BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX -ADDQ DX, AX -ADDQ $8, SI -LOOP popcntSliceLoop -popcntSliceEnd: -MOVQ AX, ret+24(FP) -RET - -TEXT ·popcntMaskSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntMaskSliceEnd -MOVQ m+24(FP), DI -popcntMaskSliceLoop: -MOVQ (DI), DX -NOTQ DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntMaskSliceLoop -popcntMaskSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntAndSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntAndSliceEnd -MOVQ m+24(FP), DI -popcntAndSliceLoop: -MOVQ (DI), DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntAndSliceLoop -popcntAndSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntOrSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntOrSliceEnd -MOVQ m+24(FP), DI -popcntOrSliceLoop: -MOVQ (DI), DX -ORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntOrSliceLoop -popcntOrSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntXorSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntXorSliceEnd -MOVQ m+24(FP), DI -popcntXorSliceLoop: -MOVQ (DI), DX -XORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntXorSliceLoop -popcntXorSliceEnd: -MOVQ AX, ret+48(FP) -RET diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/willf/bitset/popcnt_generic.go deleted file mode 100644 index 21e0ff7b4..000000000 --- a/vendor/github.com/willf/bitset/popcnt_generic.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.9 -// +build !amd64 appengine - -package bitset - -func popcntSlice(s []uint64) uint64 { - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/willf/bitset/trailing_zeros_18.go deleted file mode 100644 index c52b61be9..000000000 --- a/vendor/github.com/willf/bitset/trailing_zeros_18.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package bitset - -var deBruijn = [...]byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -func trailingZeroes64(v uint64) uint { - return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) -} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/willf/bitset/trailing_zeros_19.go deleted file mode 100644 index 36a988e71..000000000 --- a/vendor/github.com/willf/bitset/trailing_zeros_19.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func trailingZeroes64(v uint64) uint { - return uint(bits.TrailingZeros64(v)) -} diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 3bcd8cbaf..18312f004 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -3,3 +3,5 @@ *.swp /bin/ cover.out +/.idea +*.iml diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml index 257dfdfee..452601e49 100644 --- a/vendor/go.etcd.io/bbolt/.travis.yml +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -4,9 +4,10 @@ go_import_path: go.etcd.io/bbolt sudo: false go: -- 1.12 +- 1.15 before_install: +- go get -v golang.org/x/sys/unix - go get -v honnef.co/go/tools/... - go get -v github.com/kisielk/errcheck diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 2968aaa61..21ecf48f6 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -2,8 +2,6 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" -default: build - race: @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" @echo "array freelist test" diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index c9e64b1a6..f1b4a7b2b 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -908,12 +908,14 @@ Below is a list of public, open source projects that use Bolt: * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. * [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. @@ -938,9 +940,8 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 2938fed58..4e5f65ccc 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -7,6 +7,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/unix" ) // flock acquires an advisory lock on a file descriptor. @@ -49,13 +51,13 @@ func funlock(db *DB) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } // Advise the kernel that the mmap is accessed randomly. - err = madvise(b, syscall.MADV_RANDOM) + err = unix.Madvise(b, syscall.MADV_RANDOM) if err != nil && err != syscall.ENOSYS { // Ignore not implemented error in kernel because it still works. return fmt.Errorf("madvise: %s", err) @@ -76,18 +78,9 @@ func munmap(db *DB) error { } // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) + err := unix.Munmap(db.dataref) db.dataref = nil db.data = nil db.datasz = 0 return err } - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go new file mode 100644 index 000000000..e4fe91b04 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/compact.go @@ -0,0 +1,114 @@ +package bbolt + +// Compact will create a copy of the source DB and in the destination DB. This may +// reclaim space that the source database no longer has use for. txMaxSize can be +// used to limit the transactions size of this process and may trigger intermittent +// commits. A value of zero will ignore transaction sizes. +// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 +func Compact(dst, src *DB, txMaxSize int64) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > txMaxSize && txMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func walk(db *DB, walkFn walkFunc) error { + return db.View(func(tx *Tx) error { + return tx.ForEach(func(name []byte, b *Bucket) error { + return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 80b0095cc..a798c390a 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -120,6 +120,12 @@ type DB struct { // of truncate() and fsync() when growing the data file. AllocSize int + // Mlock locks database file in memory when set to true. + // It prevents major page faults, however used memory can't be reclaimed. + // + // Supported only on Unix via mlock/munlock syscalls. + Mlock bool + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -188,6 +194,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.MmapFlags = options.MmapFlags db.NoFreelistSync = options.NoFreelistSync db.FreelistType = options.FreelistType + db.Mlock = options.Mlock // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -337,7 +344,8 @@ func (db *DB) mmap(minsz int) error { } // Ensure the size is at least the minimum size. - var size = int(info.Size()) + fileSize := int(info.Size()) + var size = fileSize if size < minsz { size = minsz } @@ -346,6 +354,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Unlock db memory + if err := db.munlock(fileSize); err != nil { + return err + } + } + // Dereference all mmap references before unmapping. if db.rwtx != nil { db.rwtx.root.dereference() @@ -361,6 +376,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Don't allow swapping of data file + if err := db.mlock(fileSize); err != nil { + return err + } + } + // Save references to the meta pages. db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() @@ -422,12 +444,36 @@ func (db *DB) mmapSize(size int) (int, error) { return int(sz), nil } +func (db *DB) munlock(fileSize int) error { + if err := munlock(db, fileSize); err != nil { + return fmt.Errorf("munlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mlock(fileSize int) error { + if err := mlock(db, fileSize); err != nil { + return fmt.Errorf("mlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { + if err := db.munlock(fileSizeFrom); err != nil { + return err + } + if err := db.mlock(fileSizeTo); err != nil { + return err + } + return nil +} + // init creates a new database file and initializes its meta pages. func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) + p := db.pageInBuffer(buf, pgid(i)) p.id = pgid(i) p.flags = metaPageFlag @@ -444,13 +490,13 @@ func (db *DB) init() error { } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) + p := db.pageInBuffer(buf, pgid(2)) p.id = pgid(2) p.flags = freelistPageFlag p.count = 0 // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) + p = db.pageInBuffer(buf, pgid(3)) p.id = pgid(3) p.flags = leafPageFlag p.count = 0 @@ -462,6 +508,7 @@ func (db *DB) init() error { if err := fdatasync(db); err != nil { return err } + db.filesz = len(buf) return nil } @@ -973,6 +1020,12 @@ func (db *DB) grow(sz int) error { if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } + if db.Mlock { + // unlock old file and lock new one + if err := db.mrelock(db.filesz, sz); err != nil { + return fmt.Errorf("mlock/munlock error: %s", err) + } + } } db.filesz = sz @@ -1064,6 +1117,11 @@ type Options struct { // OpenFile is used to open files. It defaults to os.OpenFile. This option // is useful for writing hermetic tests. OpenFile func(string, int, os.FileMode) (*os.File, error) + + // Mlock locks database file in memory when set to true. + // It prevents potential page faults, however + // used memory can't be reclaimed. (UNIX only) + Mlock bool } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go index 02ef2be04..dbd67a1e7 100644 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -4,7 +4,7 @@ import "sort" // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { - // use the forwardmap to get the total count + // use the forwardMap to get the total count count := 0 for _, size := range f.forwardMap { count += int(size) @@ -41,7 +41,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { for pid := range bm { // remove the initial - f.delSpan(pid, uint64(size)) + f.delSpan(pid, size) f.allocs[pid] = txid @@ -51,7 +51,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.addSpan(pid+pgid(n), remain) for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) + delete(f.cache, pid+i) } return pid } diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod index c2366daef..96355a69b 100644 --- a/vendor/go.etcd.io/bbolt/go.mod +++ b/vendor/go.etcd.io/bbolt/go.mod @@ -2,4 +2,4 @@ module go.etcd.io/bbolt go 1.12 -require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 +require golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum index 4ad15a488..c13f8f470 100644 --- a/vendor/go.etcd.io/bbolt/go.sum +++ b/vendor/go.etcd.io/bbolt/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go new file mode 100644 index 000000000..6a6c7b353 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package bbolt + +import "golang.org/x/sys/unix" + +// mlock locks memory of db file +func mlock(db *DB, fileSize int) error { + sizeToLock := fileSize + if sizeToLock > db.datasz { + // Can't lock more than mmaped slice + sizeToLock = db.datasz + } + if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { + return err + } + return nil +} + +//munlock unlocks memory of db file +func munlock(db *DB, fileSize int) error { + if db.dataref == nil { + return nil + } + + sizeToUnlock := fileSize + if sizeToUnlock > db.datasz { + // Can't unlock more than mmaped slice + sizeToUnlock = db.datasz + } + + if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { + return err + } + return nil +} diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go new file mode 100644 index 000000000..b4a36a493 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_windows.go @@ -0,0 +1,11 @@ +package bbolt + +// mlock locks memory of db file +func mlock(_ *DB, _ int) error { + panic("mlock is supported only on UNIX systems") +} + +//munlock unlocks memory of db file +func munlock(_ *DB, _ int) error { + panic("munlock is supported only on UNIX systems") +} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 4b1a64a8b..869d41200 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -188,7 +188,6 @@ func (tx *Tx) Commit() error { } // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. if tx.db.StrictMode { ch := tx.Check() var errs []string @@ -393,7 +392,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { return err } - err = tx.Copy(f) + _, err = tx.WriteTo(f) if err != nil { _ = f.Close() return err diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index f91466f7c..038941d70 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -663,6 +663,24 @@ func inHeadIM(p *parser) bool { // Ignore the token. return true case a.Template: + // TODO: remove this divergence from the HTML5 spec. + // + // We don't handle all of the corner cases when mixing foreign + // content (i.e. or ) with