From e308ba0215baa95be5e177533adc1e15029e0869 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Wed, 25 Jan 2023 19:22:41 +0100 Subject: [PATCH] Vendor c/image after https://github.com/containers/image/pull/1816 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also includes unreleased https://github.com/openshift/imagebuilder/pull/246 to work with the updated docker/docker dependency. And updates some references to newly deprecated docker/docker symbols. [NO NEW TESTS NEEDED] Signed-off-by: Miloslav Trmač --- go.mod | 18 +- go.sum | 39 +- pkg/api/handlers/compat/system.go | 7 +- pkg/api/handlers/compat/volumes.go | 14 +- pkg/api/handlers/swagger/responses.go | 4 +- test/apiv2/45-system.at | 2 +- .../containerd/containerd/sys/epoll.go | 34 - .../containerd/containerd/sys/fds.go | 35 - .../containerd/containerd/sys/filesys_unix.go | 32 - .../containerd/sys/filesys_windows.go | 345 ---- .../containerd/containerd/sys/oom_linux.go | 82 - .../containerd/sys/oom_unsupported.go | 49 - .../containerd/containerd/sys/socket_unix.go | 81 - .../containerd/sys/socket_windows.go | 30 - .../containerd/sys/userns_deprecated.go | 23 - .../containers/image/v5/copy/compression.go | 9 +- .../containers/image/v5/copy/copy.go | 70 +- .../containers/image/v5/copy/encryption.go | 23 +- .../containers/image/v5/copy/manifest.go | 25 +- .../containers/image/v5/copy/progress_bars.go | 2 +- .../image/v5/directory/directory_src.go | 2 +- .../image/v5/directory/directory_transport.go | 2 +- .../v5/directory/explicitfilepath/path.go | 2 +- .../image/v5/docker/archive/transport.go | 15 +- .../containers/image/v5/docker/body_reader.go | 234 +++ .../v5/docker/daemon/daemon_transport.go | 2 +- .../image/v5/docker/docker_client.go | 29 +- .../image/v5/docker/docker_image.go | 4 +- .../image/v5/docker/docker_image_dest.go | 31 +- .../image/v5/docker/docker_image_src.go | 8 +- .../image/v5/docker/docker_transport.go | 2 +- .../containers/image/v5/docker/errors.go | 2 +- .../v5/docker/internal/tarfile/writer.go | 29 +- .../image/v5/docker/reference/normalize.go | 2 +- .../image/v5/docker/reference/reference.go | 2 +- .../image/v5/docker/reference/regexp.go | 3 +- .../image/v5/docker/wwwauthenticate.go | 2 +- .../image/v5/internal/image/docker_list.go | 2 +- .../image/v5/internal/image/docker_schema2.go | 2 +- .../image/v5/internal/image/oci_index.go | 2 +- .../image/v5/internal/manifest/common.go | 72 + .../v5/internal/manifest/docker_schema2.go | 15 + .../internal/manifest/docker_schema2_list.go | 255 +++ .../image/v5/internal/manifest/list.go | 86 + .../image/v5/internal/manifest/manifest.go | 167 ++ .../image/v5/internal/manifest/oci_index.go | 265 +++ .../internal/pkg/platform/platform_matcher.go | 11 +- .../containers/image/v5/internal/set/set.go | 46 + .../image/v5/internal/signature/signature.go | 13 +- .../image/v5/internal/signature/sigstore.go | 23 +- .../image/v5/internal/signature/simple.go | 8 +- .../v5/internal/uploadreader/upload_reader.go | 2 +- .../containers/image/v5/manifest/common.go | 91 - .../image/v5/manifest/docker_schema1.go | 23 +- .../image/v5/manifest/docker_schema2.go | 16 +- .../image/v5/manifest/docker_schema2_list.go | 204 +- .../containers/image/v5/manifest/list.go | 55 +- .../containers/image/v5/manifest/manifest.go | 141 +- .../containers/image/v5/manifest/oci.go | 18 +- .../containers/image/v5/manifest/oci_index.go | 217 +- .../image/v5/oci/internal/oci_util.go | 8 +- .../image/v5/oci/layout/oci_dest.go | 2 +- .../containers/image/v5/oci/layout/oci_src.go | 2 +- .../image/v5/oci/layout/oci_transport.go | 2 +- .../image/v5/openshift/openshift-copies.go | 33 +- .../image/v5/openshift/openshift.go | 6 +- .../image/v5/openshift/openshift_dest.go | 17 +- .../image/v5/openshift/openshift_transport.go | 2 +- .../image/v5/ostree/ostree_transport.go | 17 +- .../containers/image/v5/pkg/blobcache/src.go | 6 +- .../v5/pkg/blobinfocache/memory/memory.go | 22 +- .../image/v5/pkg/compression/compression.go | 2 +- .../image/v5/pkg/docker/config/config.go | 44 +- .../v5/pkg/sysregistriesv2/shortnames.go | 5 +- .../sysregistriesv2/system_registries_v2.go | 8 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 10 +- .../containers/image/v5/sif/transport.go | 2 +- .../image/v5/signature/fulcio_cert.go | 13 +- .../image/v5/signature/internal/json.go | 20 +- .../image/v5/signature/internal/rekor_set.go | 12 +- .../v5/signature/internal/sigstore_payload.go | 18 +- .../image/v5/signature/policy_config.go | 26 +- .../v5/signature/policy_config_sigstore.go | 4 +- .../image/v5/signature/policy_eval.go | 2 +- .../v5/signature/policy_eval_signedby.go | 7 +- .../image/v5/signature/sigstore/copied.go | 4 +- .../sigstore/rekor/leveled_logger.go | 10 +- .../containers/image/v5/signature/simple.go | 18 +- .../image/v5/storage/storage_dest.go | 16 +- .../image/v5/storage/storage_reference.go | 31 +- .../image/v5/storage/storage_src.go | 2 +- .../image/v5/storage/storage_transport.go | 23 +- .../image/v5/tarball/tarball_reference.go | 8 +- .../image/v5/tarball/tarball_src.go | 37 +- .../transports/alltransports/alltransports.go | 16 +- .../image/v5/transports/transports.go | 10 +- .../containers/image/v5/types/types.go | 8 +- vendor/github.com/docker/docker/AUTHORS | 283 ++- vendor/github.com/docker/docker/api/common.go | 2 +- .../github.com/docker/docker/api/swagger.yaml | 1748 ++++++++++++----- .../docker/docker/api/types/client.go | 34 +- .../docker/docker/api/types/configs.go | 1 + .../docker/api/types/container/config.go | 27 + .../api/types/container/container_create.go | 20 - .../api/types/container/container_wait.go | 28 - .../api/types/container/create_response.go | 19 + .../docker/api/types/container/deprecated.go | 16 + .../docker/api/types/container/host_config.go | 92 +- .../api/types/container/wait_exit_error.go | 12 + .../api/types/container/wait_response.go | 18 + .../docker/docker/api/types/deprecated.go | 14 + .../docker/docker/api/types/events/events.go | 47 +- .../docker/docker/api/types/filters/parse.go | 12 +- .../docker/api/types/graph_driver_data.go | 12 +- .../docker/docker/api/types/image_summary.go | 68 +- .../docker/docker/api/types/mount/mount.go | 19 +- .../docker/api/types/registry/registry.go | 49 +- .../docker/docker/api/types/swarm/common.go | 10 +- .../docker/docker/api/types/swarm/node.go | 24 + .../docker/docker/api/types/swarm/swarm.go | 10 + .../docker/docker/api/types/swarm/task.go | 19 + .../docker/api/types/time/duration_convert.go | 12 - .../docker/docker/api/types/time/timestamp.go | 6 +- .../docker/docker/api/types/types.go | 260 ++- .../docker/api/types/versions/compare.go | 3 + .../docker/api/types/volume/cluster_volume.go | 420 ++++ .../docker/api/types/volume/create_options.go | 29 + .../docker/api/types/volume/deprecated.go | 11 + .../docker/api/types/volume/list_response.go | 18 + .../docker/docker/api/types/volume/options.go | 8 + .../docker/api/types/{ => volume}/volume.go | 13 +- .../docker/api/types/volume/volume_create.go | 31 - .../docker/api/types/volume/volume_list.go | 23 - .../docker/api/types/volume/volume_update.go | 7 + .../docker/docker/client/build_cancel.go | 2 +- .../docker/docker/client/checkpoint_list.go | 2 +- .../github.com/docker/docker/client/client.go | 125 +- .../docker/docker/client/client_unix.go | 3 +- .../docker/docker/client/client_windows.go | 3 +- .../docker/docker/client/config_create.go | 2 +- .../docker/docker/client/config_inspect.go | 6 +- .../docker/docker/client/config_remove.go | 4 +- .../docker/docker/client/config_update.go | 5 +- .../docker/docker/client/container_attach.go | 6 +- .../docker/docker/client/container_commit.go | 2 +- .../docker/docker/client/container_copy.go | 18 +- .../docker/docker/client/container_create.go | 27 +- .../docker/docker/client/container_exec.go | 14 +- .../docker/docker/client/container_inspect.go | 8 +- .../docker/docker/client/container_kill.go | 4 +- .../docker/docker/client/container_list.go | 2 +- .../docker/docker/client/container_logs.go | 4 +- .../docker/docker/client/container_remove.go | 2 +- .../docker/docker/client/container_restart.go | 16 +- .../docker/docker/client/container_stop.go | 14 +- .../docker/docker/client/container_update.go | 2 +- .../docker/docker/client/container_wait.go | 39 +- .../docker/docker/client/disk_usage.go | 19 +- .../docker/client/distribution_inspect.go | 2 +- .../docker/docker/client/envvars.go | 90 + .../github.com/docker/docker/client/errors.go | 59 +- .../github.com/docker/docker/client/events.go | 1 - .../github.com/docker/docker/client/hijack.go | 26 +- .../docker/docker/client/image_build.go | 4 +- .../docker/docker/client/image_create.go | 2 +- .../docker/docker/client/image_import.go | 2 +- .../docker/docker/client/image_inspect.go | 6 +- .../docker/docker/client/image_list.go | 3 + .../docker/docker/client/image_remove.go | 2 +- .../docker/docker/client/image_search.go | 8 +- .../docker/docker/client/interface.go | 34 +- .../docker/docker/client/network_inspect.go | 6 +- .../docker/docker/client/network_remove.go | 2 +- .../docker/docker/client/node_inspect.go | 6 +- .../docker/docker/client/node_remove.go | 2 +- .../docker/docker/client/node_update.go | 3 +- .../docker/docker/client/options.go | 112 +- .../github.com/docker/docker/client/ping.go | 9 + .../docker/docker/client/plugin_inspect.go | 6 +- .../docker/docker/client/plugin_list.go | 2 +- .../docker/docker/client/plugin_remove.go | 2 +- .../docker/docker/client/request.go | 36 +- .../docker/docker/client/secret_create.go | 2 +- .../docker/docker/client/secret_inspect.go | 6 +- .../docker/docker/client/secret_remove.go | 4 +- .../docker/docker/client/secret_update.go | 5 +- .../docker/docker/client/service_create.go | 4 +- .../docker/docker/client/service_inspect.go | 6 +- .../docker/docker/client/service_remove.go | 2 +- .../docker/docker/client/service_update.go | 3 +- .../docker/docker/client/swarm_update.go | 9 +- .../docker/docker/client/task_inspect.go | 8 +- .../docker/docker/client/volume_create.go | 13 +- .../docker/docker/client/volume_inspect.go | 26 +- .../docker/docker/client/volume_list.go | 6 +- .../docker/docker/client/volume_remove.go | 2 +- .../docker/docker/client/volume_update.go | 24 + .../docker/docker/errdefs/http_helpers.go | 9 +- .../docker/docker/pkg/archive/archive.go | 316 ++- .../docker/pkg/archive/archive_linux.go | 2 +- .../docker/docker/pkg/archive/archive_unix.go | 33 +- .../docker/docker/pkg/archive/changes.go | 9 +- .../docker/docker/pkg/archive/copy.go | 11 +- .../docker/docker/pkg/archive/diff.go | 36 +- .../docker/docker/pkg/archive/diff_unix.go | 22 + .../docker/docker/pkg/archive/diff_windows.go | 6 + .../docker/docker/pkg/archive/wrap.go | 4 +- .../docker/docker/pkg/fileutils/deprecated.go | 44 + .../docker/docker/pkg/fileutils/fileutils.go | 224 --- .../docker/pkg/fileutils/fileutils_unix.go | 3 +- .../docker/pkg/homedir/homedir_linux.go | 9 + .../docker/pkg/homedir/homedir_others.go | 5 + .../docker/docker/pkg/idtools/idtools.go | 54 +- .../docker/docker/pkg/idtools/idtools_unix.go | 45 +- .../docker/pkg/idtools/idtools_windows.go | 9 + .../docker/pkg/idtools/usergroupadd_linux.go | 1 - .../docker/docker/pkg/ioutils/bytespipe.go | 30 +- .../docker/docker/pkg/ioutils/fswriters.go | 5 +- .../docker/docker/pkg/ioutils/readers.go | 16 +- .../docker/docker/pkg/ioutils/temp_unix.go | 6 +- .../docker/docker/pkg/ioutils/temp_windows.go | 6 +- .../pkg/namesgenerator/names-generator.go | 49 +- .../docker/docker/pkg/parsers/parsers.go | 30 +- .../docker/docker/pkg/system/exitcode.go | 19 - .../docker/docker/pkg/system/filesys.go | 19 + .../docker/pkg/system/filesys_deprecated.go | 35 + .../docker/docker/pkg/system/filesys_unix.go | 53 +- .../docker/pkg/system/filesys_windows.go | 174 -- .../docker/docker/pkg/system/image_os.go | 10 + .../docker/docker/pkg/system/init_windows.go | 21 +- .../docker/docker/pkg/system/lcow.go | 49 - .../docker/pkg/system/lcow_unsupported.go | 29 - .../docker/docker/pkg/system/meminfo_linux.go | 6 +- .../docker/pkg/system/meminfo_windows.go | 2 +- .../docker/docker/pkg/system/path.go | 33 +- .../docker/docker/pkg/system/path_unix.go | 6 + .../docker/docker/pkg/system/path_windows.go | 23 +- .../docker/docker/pkg/system/process_unix.go | 5 +- .../github.com/docker/docker/pkg/system/rm.go | 79 - .../docker/docker/pkg/system/rm_windows.go | 6 - .../docker/docker/pkg/system/stat_windows.go | 4 +- .../docker/docker/pkg/system/syscall_unix.go | 12 - .../docker/pkg/system/syscall_windows.go | 136 -- .../docker/docker/pkg/system/umask.go | 14 - .../docker/docker/pkg/system/umask_windows.go | 7 - vendor/github.com/moby/patternmatcher/LICENSE | 191 ++ vendor/github.com/moby/patternmatcher/NOTICE | 16 + .../moby/patternmatcher/patternmatcher.go | 474 +++++ vendor/github.com/moby/sys/mount/doc.go | 4 - vendor/github.com/moby/sys/mount/flags_bsd.go | 46 - .../github.com/moby/sys/mount/flags_linux.go | 87 - .../github.com/moby/sys/mount/flags_unix.go | 140 -- .../github.com/moby/sys/mount/mount_errors.go | 47 - .../github.com/moby/sys/mount/mount_unix.go | 88 - .../moby/sys/mount/mounter_freebsd.go | 62 - .../moby/sys/mount/mounter_linux.go | 72 - .../moby/sys/mount/mounter_openbsd.go | 78 - .../moby/sys/mount/mounter_unsupported.go | 8 - .../moby/sys/mount/sharedsubtree_linux.go | 73 - .../moby/sys/{mount => sequential}/LICENSE | 0 vendor/github.com/moby/sys/sequential/doc.go | 15 + .../moby/sys/sequential/sequential_unix.go | 45 + .../moby/sys/sequential/sequential_windows.go | 161 ++ vendor/github.com/moby/term/tc.go | 1 + vendor/github.com/moby/term/term.go | 7 +- vendor/github.com/moby/term/term_windows.go | 4 - vendor/github.com/moby/term/termios.go | 1 + vendor/github.com/moby/term/termios_bsd.go | 1 + vendor/github.com/moby/term/termios_nonbsd.go | 3 +- .../moby/term/windows/ansi_reader.go | 2 +- .../moby/term/windows/ansi_writer.go | 1 + .../github.com/moby/term/windows/console.go | 1 + vendor/github.com/moby/term/winsize.go | 1 + .../selinux/go-selinux/label/label.go | 22 +- .../selinux/go-selinux/rchcon.go | 34 - .../selinux/go-selinux/rchcon_go115.go | 22 - .../selinux/go-selinux/selinux.go | 30 +- .../selinux/go-selinux/selinux_linux.go | 185 +- .../selinux/go-selinux/selinux_stub.go | 44 +- .../opencontainers/selinux/pkg/pwalk/pwalk.go | 10 +- .../selinux/pkg/pwalkdir/pwalkdir.go | 2 +- .../openshift/imagebuilder/dispatchers.go | 8 +- .../imagebuilder/dockerfile/parser/parser.go | 2 +- vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/exp/slices/slices.go | 258 +++ vendor/golang.org/x/exp/slices/sort.go | 126 ++ vendor/golang.org/x/exp/slices/zsortfunc.go | 479 +++++ .../golang.org/x/exp/slices/zsortordered.go | 481 +++++ vendor/modules.txt | 38 +- 292 files changed, 7976 insertions(+), 5114 deletions(-) delete mode 100644 vendor/github.com/containerd/containerd/sys/epoll.go delete mode 100644 vendor/github.com/containerd/containerd/sys/fds.go delete mode 100644 vendor/github.com/containerd/containerd/sys/filesys_unix.go delete mode 100644 vendor/github.com/containerd/containerd/sys/filesys_windows.go delete mode 100644 vendor/github.com/containerd/containerd/sys/oom_linux.go delete mode 100644 vendor/github.com/containerd/containerd/sys/oom_unsupported.go delete mode 100644 vendor/github.com/containerd/containerd/sys/socket_unix.go delete mode 100644 vendor/github.com/containerd/containerd/sys/socket_windows.go delete mode 100644 vendor/github.com/containerd/containerd/sys/userns_deprecated.go create mode 100644 vendor/github.com/containers/image/v5/docker/body_reader.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/common.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/set/set.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go create mode 100644 vendor/github.com/docker/docker/api/types/deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go rename vendor/github.com/docker/docker/api/types/{ => volume}/volume.go (87%) delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_list.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go create mode 100644 vendor/github.com/docker/docker/client/envvars.go create mode 100644 vendor/github.com/docker/docker/client/volume_update.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go create mode 100644 vendor/github.com/docker/docker/pkg/system/image_os.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/rm_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/moby/patternmatcher/LICENSE create mode 100644 vendor/github.com/moby/patternmatcher/NOTICE create mode 100644 vendor/github.com/moby/patternmatcher/patternmatcher.go delete mode 100644 vendor/github.com/moby/sys/mount/doc.go delete mode 100644 vendor/github.com/moby/sys/mount/flags_bsd.go delete mode 100644 vendor/github.com/moby/sys/mount/flags_linux.go delete mode 100644 vendor/github.com/moby/sys/mount/flags_unix.go delete mode 100644 vendor/github.com/moby/sys/mount/mount_errors.go delete mode 100644 vendor/github.com/moby/sys/mount/mount_unix.go delete mode 100644 vendor/github.com/moby/sys/mount/mounter_freebsd.go delete mode 100644 vendor/github.com/moby/sys/mount/mounter_linux.go delete mode 100644 vendor/github.com/moby/sys/mount/mounter_openbsd.go delete mode 100644 vendor/github.com/moby/sys/mount/mounter_unsupported.go delete mode 100644 vendor/github.com/moby/sys/mount/sharedsubtree_linux.go rename vendor/github.com/moby/sys/{mount => sequential}/LICENSE (100%) create mode 100644 vendor/github.com/moby/sys/sequential/doc.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_unix.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_windows.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go diff --git a/go.mod b/go.mod index 372710c1e6..83ee8f4d29 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 github.com/containers/common v0.51.0 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be + github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48 github.com/containers/ocicrypt v1.1.7 github.com/containers/psgo v1.8.0 github.com/containers/storage v1.45.3 @@ -22,7 +22,7 @@ require ( github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 github.com/cyphar/filepath-securejoin v0.2.3 github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001 - github.com/docker/docker v20.10.23+incompatible + github.com/docker/docker v23.0.0+incompatible github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/docker/go-units v0.5.0 @@ -38,7 +38,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/json-iterator/go v1.1.12 github.com/mattn/go-shellwords v1.0.12 - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 + github.com/moby/term v0.0.0-20221120202655-abb19827d345 github.com/nxadm/tail v1.4.8 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.26.0 @@ -47,8 +47,8 @@ require ( github.com/opencontainers/runc v1.1.4 github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86 - github.com/opencontainers/selinux v1.10.2 - github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df + github.com/opencontainers/selinux v1.11.0 + github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73 github.com/rootless-containers/rootlesskit v1.1.0 github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.6.1 @@ -112,7 +112,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-containerregistry v0.12.1 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/trillian v1.5.0 // indirect + github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect @@ -131,8 +131,9 @@ require ( github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/sys/mount v0.3.3 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect @@ -151,7 +152,7 @@ require ( github.com/sigstore/sigstore v1.5.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.9.0 // indirect + github.com/sylabs/sif/v2 v2.9.1 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect @@ -164,6 +165,7 @@ require ( go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.5.0 // indirect + golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/tools v0.4.0 // indirect diff --git a/go.sum b/go.sum index c2447f4931..f8e0d5df00 100644 --- a/go.sum +++ b/go.sum @@ -265,8 +265,8 @@ github.com/containers/common v0.51.0 h1:Ax4YHNTG8cEPHZJcMYRoP7sfBgOISceeyOvmZzmS github.com/containers/common v0.51.0/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be h1:Bcsn9ohcVpCM9D2kZvIVMrO0QmmP87jhVZh/r9WrQ18= -github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be/go.mod h1:7Aonfvk5Wz0DLP40xgVSOGvygK3JXvDLxgotyGp4/KA= +github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48 h1:etc/YwCWeT6bfyxfKOaRnFRz2KouV53yMIi3IamUtI8= +github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48/go.mod h1:dZdaN/Ump5vD4SKQcu599upFIK1tutPuLrmk0Z1B7Yw= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= @@ -338,8 +338,8 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= -github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE= +github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= @@ -509,7 +509,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -589,8 +588,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.5.0 h1:I5pIN18bKlXtlj1Tk919rQ3mWBU2BzNNR6JhLISGMB4= -github.com/google/trillian v1.5.0/go.mod h1:2/gAIc+G1MUcErOPc+cSwHAQHZlGy+RYHjVGnhUQ3e8= +github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 h1:GFmzYtwUMi1S2mjLxfrJ/CZ9gWDG+zeLtZByg/QEBkk= +github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9/go.mod h1:vywkS3p2SgNmPL7oAWqU5PiiknzRMp+ol3a19jfY2PQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -777,17 +776,20 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= -github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= -github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= +github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -866,10 +868,11 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= -github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA= -github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73 h1:c4l+zY30Hl63ZFj+bHw6TeBBb9ok/uz7wJJoSaYR0kg= +github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= @@ -901,7 +904,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1022,8 +1025,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylabs/sif/v2 v2.9.0 h1:q9K92j1QW4/QLOtKh9YZpJHrXav6x15AVhQGPVLcg+4= -github.com/sylabs/sif/v2 v2.9.0/go.mod h1:bRdFzcqif0eDjwx0isG4cgTFoKTQn/vfBXVSoP2rB2Y= +github.com/sylabs/sif/v2 v2.9.1 h1:LxF9EcH4hmwSqDBdRv9Tt57YVkvV9rDu66AA/nmns2Y= +github.com/sylabs/sif/v2 v2.9.1/go.mod h1:10lbqUw/uptKH4Z6dRDZl+9Iz7jMiFMDE99eHRJDwOs= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -1167,6 +1170,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1689,10 +1694,12 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= diff --git a/pkg/api/handlers/compat/system.go b/pkg/api/handlers/compat/system.go index 23f116d167..64f1846433 100644 --- a/pkg/api/handlers/compat/system.go +++ b/pkg/api/handlers/compat/system.go @@ -11,6 +11,7 @@ import ( "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/domain/infra/abi" docker "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" ) func GetDiskUsage(w http.ResponseWriter, r *http.Request) { @@ -64,9 +65,9 @@ func GetDiskUsage(w http.ResponseWriter, r *http.Request) { ctnrs[i] = &t } - vols := make([]*docker.Volume, len(df.Volumes)) + vols := make([]*volume.Volume, len(df.Volumes)) for i, o := range df.Volumes { - t := docker.Volume{ + t := volume.Volume{ CreatedAt: "", Driver: "", Labels: map[string]string{}, @@ -75,7 +76,7 @@ func GetDiskUsage(w http.ResponseWriter, r *http.Request) { Options: nil, Scope: "local", Status: nil, - UsageData: &docker.VolumeUsageData{ + UsageData: &volume.UsageData{ RefCount: int64(o.Links), Size: o.Size, }, diff --git a/pkg/api/handlers/compat/volumes.go b/pkg/api/handlers/compat/volumes.go index fe6ae36aa7..b31a1bb751 100644 --- a/pkg/api/handlers/compat/volumes.go +++ b/pkg/api/handlers/compat/volumes.go @@ -52,14 +52,14 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - volumeConfigs := make([]*docker_api_types.Volume, 0, len(vols)) + volumeConfigs := make([]*docker_api_types_volume.Volume, 0, len(vols)) for _, v := range vols { mp, err := v.MountPoint() if err != nil { utils.InternalServerError(w, err) return } - config := docker_api_types.Volume{ + config := docker_api_types_volume.Volume{ Name: v.Name(), Driver: v.Driver(), Mountpoint: mp, @@ -70,7 +70,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs = append(volumeConfigs, &config) } - response := docker_api_types_volume.VolumeListOKBody{ + response := docker_api_types_volume.ListResponse{ Volumes: volumeConfigs, Warnings: []string{}, } @@ -91,7 +91,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { return } // decode params from body - input := docker_api_types_volume.VolumeCreateBody{} + input := docker_api_types_volume.CreateOptions{} if err := json.NewDecoder(r.Body).Decode(&input); err != nil { utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err)) return @@ -118,7 +118,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - response := docker_api_types.Volume{ + response := docker_api_types_volume.Volume{ CreatedAt: existingVolume.CreatedTime().Format(time.RFC3339), Driver: existingVolume.Driver(), Labels: existingVolume.Labels(), @@ -163,7 +163,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - volResponse := docker_api_types.Volume{ + volResponse := docker_api_types_volume.Volume{ Name: config.Name, Driver: config.Driver, Mountpoint: mp, @@ -193,7 +193,7 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - volResponse := docker_api_types.Volume{ + volResponse := docker_api_types_volume.Volume{ Name: vol.Name(), Driver: vol.Driver(), Mountpoint: mp, diff --git a/pkg/api/handlers/swagger/responses.go b/pkg/api/handlers/swagger/responses.go index d97bde6dd8..a1175fec9c 100644 --- a/pkg/api/handlers/swagger/responses.go +++ b/pkg/api/handlers/swagger/responses.go @@ -240,7 +240,7 @@ type containersList struct { // swagger:response type volumeInspect struct { // in:body - Body dockerAPI.Volume + Body dockerVolume.Volume } // Volume prune @@ -254,7 +254,7 @@ type volumePruneResponse struct { // swagger:response type volumeList struct { // in:body - Body dockerVolume.VolumeListOKBody + Body dockerVolume.ListResponse } // Volume list diff --git a/test/apiv2/45-system.at b/test/apiv2/45-system.at index 04e7435374..42a2f146f9 100644 --- a/test/apiv2/45-system.at +++ b/test/apiv2/45-system.at @@ -7,7 +7,7 @@ t POST 'libpod/system/prune?volumes=true&all=true' params='' 200 ## podman system df -t GET system/df 200 '{"LayersSize":0,"Images":[],"Containers":[],"Volumes":[],"BuildCache":[],"BuilderSize":0}' +t GET system/df 200 '{"LayersSize":0,"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}' t GET libpod/system/df 200 '{"ImagesSize":0,"Images":[],"Containers":[],"Volumes":[]}' # Create volume. We expect df to report this volume next invocation of system/df diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go deleted file mode 100644 index 73a57013ff..0000000000 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "golang.org/x/sys/unix" - -// EpollCreate1 is an alias for unix.EpollCreate1 -// Deprecated: use golang.org/x/sys/unix.EpollCreate1 -var EpollCreate1 = unix.EpollCreate1 - -// EpollCtl is an alias for unix.EpollCtl -// Deprecated: use golang.org/x/sys/unix.EpollCtl -var EpollCtl = unix.EpollCtl - -// EpollWait is an alias for unix.EpollWait -// Deprecated: use golang.org/x/sys/unix.EpollWait -var EpollWait = unix.EpollWait diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go deleted file mode 100644 index a71a9cd7e9..0000000000 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "os" - "path/filepath" - "strconv" -) - -// GetOpenFds returns the number of open fds for the process provided by pid -func GetOpenFds(pid int) (int, error) { - dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) - if err != nil { - return -1, err - } - return len(dirs), nil -} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go deleted file mode 100644 index 805a7a736f..0000000000 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "os" - -// ForceRemoveAll on unix is just a wrapper for os.RemoveAll -func ForceRemoveAll(path string) error { - return os.RemoveAll(path) -} - -// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go deleted file mode 100644 index 87ebacc200..0000000000 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ /dev/null @@ -1,345 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim" - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) -} - -// MkdirAll implementation that is volume path aware for Windows. It can be used -// as a drop-in replacement for os.MkdirAll() -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], adminAndLocalSystem) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = sd - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows -// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, -// when passed a containerd root data directory (i.e. the `--root` directory for containerd). -func ForceRemoveAll(path string) error { - // snapshots/windows/windows.go init() - const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" - // snapshots/windows/windows.go NewSnapshotter() - snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") - if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { - if err := cleanupWCOWLayers(snapshotDir); err != nil { - return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) - } - } - - return os.RemoveAll(path) -} - -func cleanupWCOWLayers(root string) error { - // See snapshots/windows/windows.go getSnapshotDir() - var layerNums []int - var rmLayerNums []int - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if path != root && info.IsDir() { - name := filepath.Base(path) - if strings.HasPrefix(name, "rm-") { - layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) - if err != nil { - return err - } - rmLayerNums = append(rmLayerNums, layerNum) - } else { - layerNum, err := strconv.Atoi(name) - if err != nil { - return err - } - layerNums = append(layerNums, layerNum) - } - return filepath.SkipDir - } - - return nil - }); err != nil { - return err - } - - sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) - for _, rmLayerNum := range rmLayerNums { - if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { - return err - } - } - - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) - for _, layerNum := range layerNums { - if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { - return err - } - } - - return nil -} - -func cleanupWCOWLayer(layerPath string) error { - info := hcsshim.DriverInfo{ - HomeDir: filepath.Dir(layerPath), - } - - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. - // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. - if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { - return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) - } - } - - if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) - } - - if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to destroy %s: %w", layerPath, err) - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go deleted file mode 100644 index bb2a3eafb4..0000000000 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/containerd/containerd/pkg/userns" - "golang.org/x/sys/unix" -) - -const ( - // OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9 - OOMScoreAdjMin = -1000 - // OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10 - OOMScoreAdjMax = 1000 -) - -// AdjustOOMScore sets the oom score for the provided pid. If the provided score -// is out of range (-1000 - 1000), it is clipped to the min/max value. -func AdjustOOMScore(pid, score int) error { - if score > OOMScoreAdjMax { - score = OOMScoreAdjMax - } else if score < OOMScoreAdjMin { - score = OOMScoreAdjMin - } - return SetOOMScore(pid, score) -} - -// SetOOMScore sets the oom score for the provided pid -func SetOOMScore(pid, score int) error { - if score > OOMScoreAdjMax || score < OOMScoreAdjMin { - return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax) - } - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - f, err := os.OpenFile(path, os.O_WRONLY, 0) - if err != nil { - return err - } - defer f.Close() - if _, err = f.WriteString(strconv.Itoa(score)); err != nil { - if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) { - return nil - } - return err - } - return nil -} - -// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either -// no oom score is set, or a sore is set to 0. -func GetOOMScoreAdj(pid int) (int, error) { - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.Atoi(strings.TrimSpace(string(data))) -} - -// runningPrivileged returns true if the effective user ID of the -// calling process is 0 -func runningPrivileged() bool { - return unix.Geteuid() == 0 -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go deleted file mode 100644 index fa0db5a10e..0000000000 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !linux -// +build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -const ( - // OOMScoreMaxKillable is not implemented on non Linux - OOMScoreMaxKillable = 0 - // OOMScoreAdjMax is not implemented on non Linux - OOMScoreAdjMax = 0 -) - -// AdjustOOMScore sets the oom score for the provided pid. If the provided score -// is out of range (-1000 - 1000), it is clipped to the min/max value. -// -// Not implemented on Windows -func AdjustOOMScore(pid, score int) error { - return nil -} - -// SetOOMScore sets the oom score for the process -// -// Not implemented on Windows -func SetOOMScore(pid, score int) error { - return nil -} - -// GetOOMScoreAdj gets the oom score for a process -// -// Not implemented on Windows -func GetOOMScoreAdj(pid int) (int, error) { - return 0, nil -} diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go deleted file mode 100644 index 367e19cad8..0000000000 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "net" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// CreateUnixSocket creates a unix socket and returns the listener -func CreateUnixSocket(path string) (net.Listener, error) { - // BSDs have a 104 limit - if len(path) > 104 { - return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) - } - if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { - return nil, err - } - if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - return net.Listen("unix", path) -} - -// GetLocalListener returns a listener out of a unix socket. -func GetLocalListener(path string, uid, gid int) (net.Listener, error) { - // Ensure parent directory is created - if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil { - return nil, err - } - - l, err := CreateUnixSocket(path) - if err != nil { - return l, err - } - - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - - if err := os.Chown(path, uid, gid); err != nil { - l.Close() - return nil, err - } - - return l, nil -} - -func mkdirAs(path string, uid, gid int) error { - if _, err := os.Stat(path); !os.IsNotExist(err) { - return err - } - - if err := os.MkdirAll(path, 0770); err != nil { - return err - } - - return os.Chown(path, uid, gid) -} diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go deleted file mode 100644 index 1ae12bc511..0000000000 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "net" - - "github.com/Microsoft/go-winio" -) - -// GetLocalListener returns a Listernet out of a named pipe. -// `path` must be of the form of `\\.\pipe\` -// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150) -func GetLocalListener(path string, uid, gid int) (net.Listener, error) { - return winio.ListenPipe(path, nil) -} diff --git a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go b/vendor/github.com/containerd/containerd/sys/userns_deprecated.go deleted file mode 100644 index 53acf55477..0000000000 --- a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "github.com/containerd/containerd/pkg/userns" - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. -var RunningInUserNS = userns.RunningInUserNS diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 5286975b62..8960894292 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -10,6 +10,7 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) // bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. @@ -199,7 +200,9 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp } // bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. -func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData, +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, layerCompressionChangeSupported bool) *bpCompressionStepData { logrus.Debugf("Using original blob without modification") // Remember if the original blob was compressed, and if so how, so that if @@ -232,9 +235,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom if *annotations == nil { *annotations = map[string]string{} } - for k, v := range d.uploadedAnnotations { - (*annotations)[k] = v - } + maps.Copy(*annotations, d.uploadedAnnotations) } // recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 41d578a973..a2125d7ed8 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -17,8 +17,10 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagesource" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/pkg/platform" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -32,6 +34,7 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v8" + "golang.org/x/exp/slices" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -143,7 +146,7 @@ type Options struct { // Preserve digests, and fail if we cannot. PreserveDigests bool - // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself @@ -315,7 +318,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if err != nil { return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) } - manifestList, err := manifest.ListFromBlob(mfest, manifestType) + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) if err != nil { return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) } @@ -370,12 +373,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool { // Anything goes! return true } - for _, mtype := range mtypes { - if manifest.MIMETypeIsMultiImage(mtype) { - return true - } - } - return false + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) } // compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the @@ -420,11 +418,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur if err != nil { return nil, fmt.Errorf("reading manifest list: %w", err) } - originalList, err := manifest.ListFromBlob(manifestList, manifestType) + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) if err != nil { return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) } - updatedList := originalList.Clone() + updatedList := originalList.CloneInternal() sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options, "Getting image list signatures", @@ -491,24 +489,16 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur updates := make([]manifest.ListUpdate, len(instanceDigests)) instancesCopied := 0 for i, instanceDigest := range instanceDigests { - if options.ImageListSelection == CopySpecificImages { - skip := true - for _, instance := range options.Instances { - if instance == instanceDigest { - skip = false - break - } - } - if skip { - update, err := updatedList.Instance(instanceDigest) - if err != nil { - return nil, err - } - logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - // Record the digest/size/type of the manifest that we didn't copy. - updates[i] = update - continue + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + update, err := updatedList.Instance(instanceDigest) + if err != nil { + return nil, err } + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + // Record the digest/size/type of the manifest that we didn't copy. + updates[i] = update + continue } logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) @@ -536,7 +526,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur c.Printf("Writing manifest list to image destination\n") var errs []string for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { - attemptedList := updatedList + var attemptedList internalManifest.ListPublic = updatedList logrus.Debugf("Trying to use manifest list type %s…", thisListType) @@ -823,7 +813,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // has a built-in list of functions/methods (whatever object they are for) // which have their format strings checked; for other names we would have // to pass a parameter to every (go tool vet) invocation. -func (c *copier) Printf(format string, a ...interface{}) { +func (c *copier) Printf(format string, a ...any) { fmt.Fprintf(c.reportWriter, format, a...) } @@ -948,20 +938,20 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { data[index] = cld } - // Create layer Encryption map - encLayerBitmap := map[int]bool{} + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() var encryptAll bool if ic.ociEncryptLayers != nil { encryptAll = len(*ic.ociEncryptLayers) == 0 totalLayers := len(srcInfos) for _, l := range *ic.ociEncryptLayers { // if layer is negative, it is reverse indexed. - encLayerBitmap[(totalLayers+l)%totalLayers] = true + layersToEncrypt.Add((totalLayers + l) % totalLayers) } if encryptAll { for i := 0; i < len(srcInfos); i++ { - encLayerBitmap[i] = true + layersToEncrypt.Add(i) } } } @@ -980,7 +970,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { return fmt.Errorf("copying layer: %w", err) } copyGroup.Add(1) - go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference()) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) } // A call to copyGroup.Wait() is done at this point by the defer above. @@ -1013,15 +1003,9 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) } // copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 5eae8bfcda..54aca9e572 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -7,6 +7,8 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/ocicrypt" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // isOciEncrypted returns a bool indicating if a mediatype is encrypted @@ -18,12 +20,9 @@ func isOciEncrypted(mediatype string) bool { // isEncrypted checks if an image is encrypted func isEncrypted(i types.Image) bool { layers := i.LayerInfos() - for _, l := range layers { - if isOciEncrypted(l.MediaType) { - return true - } - } - return false + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) } // bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. @@ -47,11 +46,9 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types. stream.reader = reader stream.info.Digest = decryptedDigest stream.info.Size = -1 - for k := range stream.info.Annotations { - if strings.HasPrefix(k, "org.opencontainers.image.enc") { - delete(stream.info.Annotations, k) - } - } + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) return &bpDecryptionStepData{ decrypting: true, }, nil @@ -122,8 +119,6 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty if *annotations == nil { *annotations = map[string]string{} } - for k, v := range encryptAnnotations { - (*annotations)[k] = v - } + maps.Copy(*annotations, encryptAnnotations) return nil } diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index df12e50c0f..a35ea42205 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -6,9 +6,11 @@ import ( "fmt" "strings" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. @@ -19,7 +21,7 @@ var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, man // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { list []string - included map[string]struct{} + included *set.Set[string] } // newOrderedSet creates a correctly initialized orderedSet. @@ -27,15 +29,15 @@ type orderedSet struct { func newOrderedSet() *orderedSet { return &orderedSet{ list: []string{}, - included: map[string]struct{}{}, + included: set.New[string](), } } // append adds s to the end of os, only if it is not included already. func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { + if !os.included.Contains(s) { os.list = append(os.list, s) - os.included[s] = struct{}{} + os.included.Add(s) } } @@ -80,10 +82,10 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest otherMIMETypeCandidates: []string{}, }, nil } - supportedByDest := map[string]struct{}{} + supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { - supportedByDest[t] = struct{}{} + supportedByDest.Add(t) } } @@ -96,7 +98,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest prioritizedTypes := newOrderedSet() // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { + if supportedByDest.Contains(srcType) { prioritizedTypes.append(srcType) } if in.cannotModifyManifestReason != "" { @@ -113,7 +115,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest // Then use our list of preferred types. for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { + if supportedByDest.Contains(t) { prioritizedTypes.append(t) } } @@ -166,11 +168,8 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport prioritizedTypes := newOrderedSet() // The first priority is the current type, if it's in the list, since that lets us avoid a // conversion that isn't strictly necessary. - for _, t := range destSupportedMIMETypes { - if t == currentListMIMEType { - prioritizedTypes.append(currentListMIMEType) - break - } + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) } // Pick out the other list types that we support. for _, t := range destSupportedMIMETypes { diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 43145fb786..25f2463630 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() { bar.SetCurrent(bar.originalSize) // This triggers the completion condition. } else { // -1 = unknown size - // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known // size (possible at least in theory), in mpb, zero-sized progress bars are treated // as unknown size, in particular they are not configured to be marked as // complete on bar.Current() reaching bar.total (because that would happen already diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index 98efdedd73..5fc83bb6f9 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -8,9 +8,9 @@ import ( "github.com/containers/image/v5/internal/imagesource/impl" "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go index 253ecb2475..7e3068693d 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_transport.go +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dirReference) StringWithinTransport() string { return ref.path diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go index b4ff4d08a5..32ae1ae8a7 100644 --- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -9,7 +9,7 @@ import ( // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. // To do so, all elements of the input path must exist; as a special case, the final component may be // a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. func ResolvePathToFullyExplicit(path string) (string, error) { switch _, err := os.Lstat(path); { case err == nil: diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 304a8c618f..468a37c90a 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -62,24 +62,23 @@ func ParseReference(refString string) (types.ImageReference, error) { return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString) } - parts := strings.SplitN(refString, ":", 2) - path := parts[0] + path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":") var nt reference.NamedTagged sourceIndex := -1 - if len(parts) == 2 { + if gotTagOrIndex { // A :tag or :@index was specified. - if len(parts[1]) > 0 && parts[1][0] == '@' { - i, err := strconv.Atoi(parts[1][1:]) + if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' { + i, err := strconv.Atoi(tagOrIndex[1:]) if err != nil { - return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err) + return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err) } if i < 0 { return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i) } sourceIndex = i } else { - ref, err := reference.ParseNormalizedNamed(parts[1]) + ref, err := reference.ParseNormalizedNamed(tagOrIndex) if err != nil { return nil, fmt.Errorf("docker-archive parsing reference: %w", err) } @@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref archiveReference) StringWithinTransport() string { switch { diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go new file mode 100644 index 0000000000..f958e80e8e --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -0,0 +1,234 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "syscall" + "time" + + "github.com/sirupsen/logrus" +) + +// bodyReaderMinimumProgress is the minimum progress we want to see before we retry +const bodyReaderMinimumProgress = 1 * 1024 * 1024 + +// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob, +// which can transparently resume some (very limited) kinds of aborted connections. +type bodyReader struct { + ctx context.Context + c *dockerClient + + path string // path to pass to makeRequest to retry + logURL *url.URL // a string to use in error messages + body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. + lastRetryOffset int64 + offset int64 // Current offset within the blob + firstConnectionTime time.Time + lastSuccessTime time.Time // time.Time{} if N/A +} + +// newBodyReader creates a bodyReader for request path in c. +// firstBody is an already correctly opened body for the blob, returing the full blob from the start. +// If reading from firstBody fails, bodyReader may heuristically decide to resume. +func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { + logURL, err := c.resolveRequestURL(path) + if err != nil { + return nil, err + } + res := &bodyReader{ + ctx: ctx, + c: c, + + path: path, + logURL: logURL, + body: firstBody, + lastRetryOffset: 0, + offset: 0, + firstConnectionTime: time.Now(), + } + return res, nil +} + +// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number. +func parseDecimalInString(s string, start int) (int64, int, error) { + i := start + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + if i == start { + return -1, -1, errors.New("missing decimal number") + } + v, err := strconv.ParseInt(s[start:i], 10, 64) + if err != nil { + return -1, -1, fmt.Errorf("parsing number: %w", err) + } + return v, i, nil +} + +// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it. +func parseExpectedChar(s string, pos int, expected byte) (int, error) { + if pos == len(s) || s[pos] != expected { + return -1, fmt.Errorf("missing expected %q", expected) + } + return pos + 1, nil +} + +// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1. +func parseContentRange(res *http.Response) (int64, int64, int64, error) { + hdrs := res.Header.Values("Content-Range") + switch len(hdrs) { + case 0: + return -1, -1, -1, errors.New("missing Content-Range: header") + case 1: + break + default: + return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs)) + } + hdr := hdrs[0] + expectedPrefix := "bytes " + if !strings.HasPrefix(hdr, expectedPrefix) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix) + } + first, pos, err := parseDecimalInString(hdr, len(expectedPrefix)) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '-') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + last, pos, err := parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '/') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + completeLength := int64(-1) + if pos < len(hdr) && hdr[pos] == '*' { + pos++ + } else { + completeLength, pos, err = parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err) + } + } + if pos < len(hdr) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr) + } + return first, last, completeLength, nil +} + +// Read implements io.ReadCloser +func (br *bodyReader) Read(p []byte) (int, error) { + if br.body == nil { + return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted()) + } + n, err := br.body.Read(p) + br.offset += int64(n) + switch { + case err == nil || err == io.EOF: + br.lastSuccessTime = time.Now() + return n, err // Unlike the default: case, don’t log anything. + + case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET): + originalErr := err + redactedURL := br.logURL.Redacted() + if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil { + return n, err + } + + if err := br.body.Close(); err != nil { + logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise + } + br.body = nil + time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + + headers := map[string][]string{ + "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, + } + res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil) + if err != nil { + return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err) + } + consumedBody := false + defer func() { + if !consumedBody { + res.Body.Close() + } + }() + switch res.StatusCode { + case http.StatusPartialContent: // OK + // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed. + // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation. + first, last, completeLength, err := parseContentRange(res) + if err != nil { + return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err) + } + // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob. + if first != br.offset || (completeLength != -1 && last+1 != completeLength) { + return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength) + } + // Continue below + case http.StatusOK: + return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK) + default: + err := registryHTTPResponseToError(res) + return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) + } + + logrus.Debugf("Succesfully reconnected to %s", redactedURL) + consumedBody = true + br.body = res.Body + br.lastRetryOffset = br.offset + return n, nil + + default: + logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err) + return n, err + } +} + +// millisecondsSince is like time.Since(tm).Milliseconds, but it returns a floating-point value +func millisecondsSince(tm time.Time) float64 { + return float64(time.Since(tm).Nanoseconds()) / 1_000_000.0 +} + +// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil, +// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic) +func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error { + totalTime := millisecondsSince(br.firstConnectionTime) + failureTime := math.NaN() + if (br.lastSuccessTime != time.Time{}) { + failureTime = millisecondsSince(br.lastSuccessTime) + } + logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: lastRetryOffset %d, offset %d, %.3f ms since first connection, %.3f ms since last progress", + redactedURL, originalErr, br.lastRetryOffset, br.offset, totalTime, failureTime) + progress := br.offset - br.lastRetryOffset + if progress < bodyReaderMinimumProgress { + logrus.Debugf("Not reconnecting to %s because only %d bytes progress made", redactedURL, progress) + return fmt.Errorf("(heuristic tuning data: last retry %d, current offset %d; %.3f ms total, %.3f ms since progress): %w", + br.lastRetryOffset, br.offset, totalTime, failureTime, originalErr) + } + logrus.Infof("Reading blob body from %s failed (%v), reconnecting…", redactedURL, originalErr) + return nil +} + +// Close implements io.ReadCloser +func (br *bodyReader) Close() error { + if br.body == nil { + return nil + } + err := br.body.Close() + br.body = nil + return err +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index ad21890843..9eedff7456 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). func (ref daemonReference) StringWithinTransport() string { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index f8d17eaaad..6601363d3b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -448,8 +448,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima if link == "" { break } - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) if err != nil { return searchRes, err } @@ -472,14 +472,24 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } - urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - requestURL, err := url.Parse(urlString) + requestURL, err := c.resolveRequestURL(path) if err != nil { return nil, err } return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope) } +// resolveRequestURL turns a path for c.makeRequest into a full URL. +// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs. +func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + res, err := url.Parse(urlString) + if err != nil { + return nil, err + } + return res, nil +} + // Checks if the auth headers in the response contain an indication of a failed // authorizdation because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. @@ -586,7 +596,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri case <-time.After(delay): // Nothing } - delay = delay * 2 // If the registry does not specify a delay, back off exponentially. + delay *= 2 // If the registry does not specify a delay, back off exponentially. } } @@ -965,7 +975,14 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - return res.Body, getBlobSize(res), nil + blobSize := getBlobSize(res) + + reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) + if err != nil { + res.Body.Close() + return nil, 0, err + } + return reconnectingReader, blobSize, nil } // getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit. diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 6a4331e335..6e121533ed 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -94,8 +94,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. break } - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) if err != nil { return tags, err } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 7a7f72d9a2..3ca59682fb 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -21,6 +21,7 @@ import ( "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/streamdigest" "github.com/containers/image/v5/internal/uploadreader" @@ -32,6 +33,8 @@ import ( "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) type dockerImageDestination struct { @@ -731,19 +734,10 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, // But right now we don’t want to deal with corner cases like bad digest formats // or unavailable algorithms; in the worst case we end up with duplicate signature // entries. - layer.Digest.String() != digest.FromBytes(payloadBlob).String() { + layer.Digest.String() != digest.FromBytes(payloadBlob).String() || + !maps.Equal(layer.Annotations, annotations) { return false } - if len(layer.Annotations) != len(annotations) { - return false - } - for k, v1 := range layer.Annotations { - if v2, ok := annotations[k]; !ok || v1 != v2 { - return false - } - } - // All annotations in layer exist in sig, and the number of annotations is the same, so all annotations - // in sig also exist in layer. return true } @@ -803,12 +797,11 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context if err != nil { return err } - existingSigNames := map[string]struct{}{} + existingSigNames := set.New[string]() for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} + existingSigNames.Add(sig.Name) } -sigExists: for _, newSigWithFormat := range signatures { newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) if !ok { @@ -816,10 +809,10 @@ sigExists: } newSig := newSigSimple.UntrustedSignature() - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } + if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool { + return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. @@ -831,7 +824,7 @@ sigExists: return fmt.Errorf("generating random signature len %d: %w", n, err) } signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { + if !existingSigNames.Contains(signatureName) { break } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 373fb259c1..a115268de3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -250,7 +250,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, currentOffset += toSkip } s := signalCloseReader{ - closed: make(chan interface{}), + closed: make(chan struct{}), stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } @@ -292,7 +292,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read return } s := signalCloseReader{ - closed: make(chan interface{}), + closed: make(chan struct{}), stream: p, } streams <- s @@ -335,7 +335,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) { func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) - var rangeVals []string + rangeVals := make([]string, 0, len(chunks)) for _, c := range chunks { rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) } @@ -766,7 +766,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) } type signalCloseReader struct { - closed chan interface{} + closed chan struct{} stream io.ReadCloser consumeStream bool } diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go index 0544bb3c93..6ae8491594 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dockerReference) StringWithinTransport() string { return "//" + reference.FamiliarString(ref.ref) diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 74fe17648c..2caa10d7d3 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -40,7 +40,7 @@ func httpResponseToError(res *http.Response, context string) error { return ErrUnauthorizedForCredentials{Err: err} default: if context != "" { - context = context + ": " + context += ": " } return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index 44e1004a62..b5721fef88 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -13,10 +13,12 @@ import ( "time" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // Writer allows creating a (docker save)-formatted tar archive containing one or more images. @@ -29,7 +31,7 @@ type Writer struct { // Other state. blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs repositories map[string]map[string]string - legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent. + legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent. manifest []ManifestItem manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. } @@ -42,7 +44,7 @@ func NewWriter(dest io.Writer) *Writer { tar: tar.NewWriter(dest), blobs: make(map[digest.Digest]types.BlobInfo), repositories: map[string]map[string]string{}, - legacyLayers: map[string]struct{}{}, + legacyLayers: set.New[string](), manifestByConfig: map[digest.Digest]int{}, } } @@ -89,7 +91,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) { // ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer // The caller must have locked the Writer. func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { - if _, ok := w.legacyLayers[layerID]; !ok { + if !w.legacyLayers.Contains(layerID) { // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). // See also the comment in physicalLayerPath. physicalLayerPath := w.physicalLayerPath(layerDigest) @@ -106,7 +108,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges return fmt.Errorf("writing config json file: %w", err) } - w.legacyLayers[layerID] = struct{}{} + w.legacyLayers.Add(layerID) } return nil } @@ -117,7 +119,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De lastLayerID := "" for i, l := range layerDescriptors { // The legacy format requires a config file per layer - layerConfig := make(map[string]interface{}) + layerConfig := make(map[string]any) // The root layer doesn't have any parent if lastLayerID != "" { @@ -188,14 +190,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error { if a.Config != b.Config { return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) } - if len(a.Layers) != len(b.Layers) { + if !slices.Equal(a.Layers, b.Layers) { return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) } - for i := range a.Layers { - if a.Layers[i] != b.Layers[i] { - return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i]) - } - } // Ignore RepoTags, that will be built later. // Ignore Parent and LayerSources, which we don’t set to anything meaningful. return nil @@ -229,9 +226,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des item = &w.manifest[i] } - knownRepoTags := map[string]struct{}{} + knownRepoTags := set.New[string]() for _, repoTag := range item.RepoTags { - knownRepoTags[repoTag] = struct{}{} + knownRepoTags.Add(repoTag) } for _, tag := range repoTags { // For github.com/docker/docker consumers, this works just as well as @@ -252,9 +249,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des // analysis and explanation. refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) - if _, ok := knownRepoTags[refString]; !ok { + if !knownRepoTags.Contains(refString) { item.RepoTags = append(item.RepoTags, refString) - knownRepoTags[refString] = struct{}{} + knownRepoTags.Add(refString) } } @@ -337,7 +334,7 @@ func (t *tarFI) ModTime() time.Time { func (t *tarFI) IsDir() bool { return false } -func (t *tarFI) Sys() interface{} { +func (t *tarFI) Sys() any { return nil } diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go index 6a86ec64fd..d3f47d210f 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/normalize.go +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) { } // familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain +// to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go index b7cd00b0d6..9e6497539a 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/reference.go +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) { // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name -// DEPRECATED: Use Domain or Path +// Deprecated: Use Domain or Path func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go index bb8c554743..76ba5c2d5c 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/regexp.go +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -1,9 +1,10 @@ package reference import ( - storageRegexp "github.com/containers/storage/pkg/regexp" "regexp" "strings" + + storageRegexp "github.com/containers/storage/pkg/regexp" ) const ( diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index 37ca098a81..6bcb835b9e 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -149,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) { p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true - for i = i + 1; i < len(s); i++ { + for i++; i < len(s); i++ { b := s[i] switch { case escape: diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go index 8afc406282..617a451aa9 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_list.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" ) diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go index 23a21999aa..15c9c22793 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa delete(rawContents, "rootfs") delete(rawContents, "history") - updates := map[string]interface{}{"id": v1ID} + updates := map[string]any{"id": v1ID} if parentV1ID != "" { updates["parent"] = parentV1ID } diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go index f4f76622c5..0e945c8519 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" ) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/common.go b/vendor/github.com/containers/image/v5/internal/manifest/common.go new file mode 100644 index 0000000000..1f2ccb5286 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/common.go @@ -0,0 +1,72 @@ +package manifest + +import ( + "encoding/json" + "fmt" +) + +// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat +// can expect to be present. +type AllowedManifestFields int + +const ( + AllowedFieldConfig AllowedManifestFields = 1 << iota + AllowedFieldFSLayers + AllowedFieldHistory + AllowedFieldLayers + AllowedFieldManifests + AllowedFieldFirstUnusedBit // Keep this at the end! +) + +// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than +// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields +// other than the ones the caller specifically allows. +// expectedMIMEType is used only for diagnostics. +// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous +// data that just isn’t what was expected, as opposed to actually ambiguous data. +func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, + allowed AllowedManifestFields) error { + if allowed >= AllowedFieldFirstUnusedBit { + return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) + } + // Use a private type to decode, not just a map[string]any, because we want + // to also reject case-insensitive matches (which would be used by Go when really decoding + // the manifest). + // (It is expected that as manifest formats are added or extended over time, more fields will be added + // here.) + detectedFields := struct { + Config any `json:"config"` + FSLayers any `json:"fsLayers"` + History any `json:"history"` + Layers any `json:"layers"` + Manifests any `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &detectedFields); err != nil { + // The caller was supposed to already validate version numbers, so this should not happen; + // let’s not bother with making this error “nice”. + return err + } + unexpected := []string{} + // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. + if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 { + unexpected = append(unexpected, "config") + } + if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 { + unexpected = append(unexpected, "fsLayers") + } + if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 { + unexpected = append(unexpected, "history") + } + if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 { + unexpected = append(unexpected, "layers") + } + if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 { + unexpected = append(unexpected, "manifests") + } + if len(unexpected) != 0 { + return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, + unexpected, expectedMIMEType) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go new file mode 100644 index 0000000000..68d0796978 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go @@ -0,0 +1,15 @@ +package manifest + +import ( + "github.com/opencontainers/go-digest" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +// +// This is publicly visible as c/image/manifest.Schema2Descriptor. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go new file mode 100644 index 0000000000..76261e436a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -0,0 +1,255 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +// This is publicly visible as c/image/manifest.Schema2PlatformSpec. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2ListPublic is a list of platform-specific manifests. +// This is publicly visible as c/image/manifest.Schema2List. +// Internal users should usually use Schema2List instead. +type Schema2ListPublic struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2ListPublic) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2ListPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2ListPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) + } + return buf, nil +} + +// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the +// supplied data. +// This is publicly visible as c/image/manifest.Schema2ListFromComponents. +func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic { + list := Schema2ListPublic{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: slices.Clone(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: slices.Clone(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListPublicClone creates a deep copy of the passed-in list. +// This is publicly visible as c/image/manifest.Schema2ListClone. +func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { + return Schema2ListPublicFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: slices.Clone(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexPublicFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) { + return Schema2ListPublicClone(list), nil +} + +// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +// This is publicly visible as c/image/manifest.Schema2ListFromManifest. +func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) { + list := Schema2ListPublic{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, + AllowedFieldManifests); err != nil { + return nil, err + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2ListPublic) Clone() ListPublic { + return Schema2ListPublicClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + Schema2ListPublic +} + +func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { + return &Schema2List{*public} +} + +func (index *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +} + +func (index *Schema2List) Clone() ListPublic { + return index.CloneInternal() +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + public, err := Schema2ListPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return schema2ListFromPublic(public), nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go new file mode 100644 index 0000000000..6866ab01ca --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -0,0 +1,86 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ListPublic is a subset of List which is a part of the public API; +// so no methods can be added, removed or changed. +// +// Internal users should usually use List instead. +type ListPublic interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (ListPublic, error) + + // Clone returns a deep copy of this list and its contents. + Clone() ListPublic +} + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + ListPublic + // CloneInternal returns a deep copy of this list and its contents. + CloneInternal() List +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +// This is publicly visible as c/image/manifest.ListUpdate. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// ListPublicFromBlob parses a list of manifests. +// This is publicly visible as c/image/manifest.ListFromBlob. +func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { + list, err := ListFromBlob(manifest, manifestMIMEType) + if err != nil { + return nil, err + } + return list, nil +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go new file mode 100644 index 0000000000..1dbcc14182 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -0,0 +1,167 @@ +package manifest + +import ( + "encoding/json" + + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +// This is publicly visible as c/image/manifest.GuessMIMEType. +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures any `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: + return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType + } + // Maybe an image index or an OCI artifact. + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + // FIXME: this is mixing media types of manifests and configs. + return ociMan.Config.MediaType + } + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +// This is publicly visible as c/image/manifest.Digest. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +// This is publicly visible as c/image/manifest.MatchesDigest. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +// This is publicly visible as c/image/manifest.NormalizedMIMEType. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go new file mode 100644 index 0000000000..4e8ef62203 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -0,0 +1,265 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +// OCI1IndexPublic is just an alias for the OCI index type, but one which we can +// provide methods for. +// This is publicly visible as c/image/manifest.OCI1Index +// Internal users should usually use OCI1Index instead. +type OCI1IndexPublic struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1IndexPublic) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1IndexPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range index.Manifests { + if d.Platform == nil { + continue + } + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + + for _, d := range index.Manifests { + if d.Platform == nil { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1IndexPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) + } + return buf, nil +} + +// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the +// supplied data. +// This is publicly visible as c/image/manifest.OCI1IndexFromComponents. +func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic { + index := OCI1IndexPublic{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: maps.Clone(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: slices.Clone(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexPublicClone creates a deep copy of the passed-in index. +// This is publicly visible as c/image/manifest.OCI1IndexClone. +func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { + return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + return OCI1IndexPublicClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: slices.Clone(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListPublicFromComponents(components) + return s2, nil +} + +// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +// This is publicly visible as c/image/manifest.OCI1IndexFromManifest. +func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) { + index := OCI1IndexPublic{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + AllowedFieldManifests); err != nil { + return nil, err + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1IndexPublic) Clone() ListPublic { + return OCI1IndexPublicClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} + +type OCI1Index struct { + OCI1IndexPublic +} + +func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index { + return &OCI1Index{*public} +} + +func (index *OCI1Index) CloneInternal() List { + return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic)) +} + +func (index *OCI1Index) Clone() ListPublic { + return index.CloneInternal() +} + +// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest list. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + public, err := OCI1IndexPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return oci1IndexFromPublic(public), nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 3bda6af291..59b1d4b9e2 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // For Linux, the kernel has already detected the ABI, ISA and Features. @@ -152,13 +153,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { if wantedVariant != "" { // If the user requested a specific variant, we'll walk down // the list from most to least compatible. - if compatibility[wantedArch] != nil { - variantOrder := compatibility[wantedArch] - for i, v := range variantOrder { - if wantedVariant == v { - variants = variantOrder[i:] - break - } + if variantOrder := compatibility[wantedArch]; variantOrder != nil { + if i := slices.Index(variantOrder, wantedVariant); i != -1 { + variants = variantOrder[i:] } } if variants == nil { diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go new file mode 100644 index 0000000000..5c7bcabef8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -0,0 +1,46 @@ +package set + +import "golang.org/x/exp/maps" + +// FIXME: +// - Docstrings +// - This should be in a public library somewhere + +type Set[E comparable] struct { + m map[E]struct{} +} + +func New[E comparable]() *Set[E] { + return &Set[E]{ + m: map[E]struct{}{}, + } +} + +func NewWithValues[E comparable](values ...E) *Set[E] { + s := New[E]() + for _, v := range values { + s.Add(v) + } + return s +} + +func (s Set[E]) Add(v E) { + s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +func (s Set[E]) Delete(v E) { + delete(s.m, v) +} + +func (s *Set[E]) Contains(v E) bool { + _, ok := s.m[v] + return ok +} + +func (s *Set[E]) Empty() bool { + return len(s.m) == 0 +} + +func (s *Set[E]) Values() []E { + return maps.Keys(s.m) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go index ee90b788b0..25823a50cc 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/signature.go +++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go @@ -66,17 +66,15 @@ func FromBlob(blob []byte) (Signature, error) { // The newer format: binary 0, format name, newline, data case 0x00: blob = blob[1:] - newline := bytes.IndexByte(blob, '\n') - if newline == -1 { + formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'}) + if !foundNewline { return nil, fmt.Errorf("invalid signature format, missing newline") } - formatBytes := blob[:newline] for _, b := range formatBytes { if b < 32 || b >= 0x7F { return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) } } - blobChunk := blob[newline+1:] switch { case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): return SimpleSigningFromBlob(blobChunk), nil @@ -102,10 +100,3 @@ func UnsupportedFormatError(sig Signature) error { return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) } } - -// copyByteSlice returns a guaranteed-fresh copy of a byte slice -// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified. -func copyByteSlice(s []byte) []byte { - res := []byte{} - return append(res, s...) -} diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go index 30aff4c1e8..23c8f7f413 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -1,6 +1,11 @@ package signature -import "encoding/json" +import ( + "encoding/json" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) const ( // from sigstore/cosign/pkg/types.SimpleSigningMediaType @@ -40,8 +45,8 @@ type sigstoreJSONRepresentation struct { func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { return Sigstore{ untrustedMIMEType: untrustedMimeType, - untrustedPayload: copyByteSlice(untrustedPayload), - untrustedAnnotations: copyStringMap(untrustedAnnotations), + untrustedPayload: slices.Clone(untrustedPayload), + untrustedAnnotations: maps.Clone(untrustedAnnotations), } } @@ -74,17 +79,9 @@ func (s Sigstore) UntrustedMIMEType() string { return s.untrustedMIMEType } func (s Sigstore) UntrustedPayload() []byte { - return copyByteSlice(s.untrustedPayload) + return slices.Clone(s.untrustedPayload) } func (s Sigstore) UntrustedAnnotations() map[string]string { - return copyStringMap(s.untrustedAnnotations) -} - -func copyStringMap(m map[string]string) map[string]string { - res := map[string]string{} - for k, v := range m { - res[k] = v - } - return res + return maps.Clone(s.untrustedAnnotations) } diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go index 88b8adad03..c093704060 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/simple.go +++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go @@ -1,5 +1,7 @@ package signature +import "golang.org/x/exp/slices" + // SimpleSigning is a “simple signing” signature. type SimpleSigning struct { untrustedSignature []byte @@ -8,7 +10,7 @@ type SimpleSigning struct { // SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { return SimpleSigning{ - untrustedSignature: copyByteSlice(blobChunk), + untrustedSignature: slices.Clone(blobChunk), } } @@ -19,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID { // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. // Almost everyone should use signature.Blob() instead. func (s SimpleSigning) blobChunk() ([]byte, error) { - return copyByteSlice(s.untrustedSignature), nil + return slices.Clone(s.untrustedSignature), nil } func (s SimpleSigning) UntrustedSignature() []byte { - return copyByteSlice(s.untrustedSignature) + return slices.Clone(s.untrustedSignature) } diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go index 6aa9ead68f..b95370af76 100644 --- a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go +++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go @@ -11,7 +11,7 @@ import ( // The net/http package uses a separate goroutine to upload data to a HTTP connection, // and it is possible for the server to return a response (typically an error) before consuming // the full body of the request. In that case http.Client.Do can return with an error while -// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context(). +// the body is still being read — regardless of the cancellation, if any, of http.Request.Context(). // // As a result, any data used/updated by the io.Reader() provided as the request body may be // used/updated even after http.Client.Do returns, causing races. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 5f352acc2f..1bdcf3d305 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -1,7 +1,6 @@ package manifest import ( - "encoding/json" "fmt" compressiontypes "github.com/containers/image/v5/pkg/compression/types" @@ -9,96 +8,6 @@ import ( "github.com/sirupsen/logrus" ) -// dupStringSlice returns a deep copy of a slice of strings, or nil if the -// source slice is empty. -func dupStringSlice(list []string) []string { - if len(list) == 0 { - return nil - } - dup := make([]string, len(list)) - copy(dup, list) - return dup -} - -// dupStringStringMap returns a deep copy of a map[string]string, or nil if the -// passed-in map is nil or has no keys. -func dupStringStringMap(m map[string]string) map[string]string { - if len(m) == 0 { - return nil - } - result := make(map[string]string) - for k, v := range m { - result[k] = v - } - return result -} - -// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat -// can expect to be present. -type allowedManifestFields int - -const ( - allowedFieldConfig allowedManifestFields = 1 << iota - allowedFieldFSLayers - allowedFieldHistory - allowedFieldLayers - allowedFieldManifests - allowedFieldFirstUnusedBit // Keep this at the end! -) - -// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than -// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields -// other than the ones the caller specifically allows. -// expectedMIMEType is used only for diagnostics. -// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous -// data that just isn’t what was expected, as opposed to actually ambiguous data. -func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, - allowed allowedManifestFields) error { - if allowed >= allowedFieldFirstUnusedBit { - return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) - } - // Use a private type to decode, not just a map[string]interface{}, because we want - // to also reject case-insensitive matches (which would be used by Go when really decoding - // the manifest). - // (It is expected that as manifest formats are added or extended over time, more fields will be added - // here.) - detectedFields := struct { - Config interface{} `json:"config"` - FSLayers interface{} `json:"fsLayers"` - History interface{} `json:"history"` - Layers interface{} `json:"layers"` - Manifests interface{} `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this should not happen; - // let’s not bother with making this error “nice”. - return err - } - unexpected := []string{} - // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. - if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 { - unexpected = append(unexpected, "config") - } - if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 { - unexpected = append(unexpected, "fsLayers") - } - if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 { - unexpected = append(unexpected, "history") - } - if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 { - unexpected = append(unexpected, "layers") - } - if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 { - unexpected = append(unexpected, "manifests") - } - if len(unexpected) != 0 { - return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, - unexpected, expectedMIMEType) - } - return nil -} - // layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() // method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. func layerInfosToStrings(infos []LayerInfo) []string { diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 8e260c03db..7b9c4b58fb 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -8,10 +8,13 @@ import ( "time" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. @@ -53,16 +56,16 @@ type Schema1V1Compatibility struct { // Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. // (NOTE: The instance is not necessary a literal representation of the original blob, // layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { +func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) { s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { + if err := json.Unmarshal(manifestBlob, &s1); err != nil { return nil, err } if s1.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) } - if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType, - allowedFieldFSLayers|allowedFieldHistory); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType, + manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil { return nil, err } if err := s1.initialize(); err != nil { @@ -183,22 +186,22 @@ func (m *Schema1) fixManifestLayers() error { return errors.New("Invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) + idmap := set.New[string]() var lastID string for _, img := range m.ExtractedV1Compatibility { // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { + if img.ID != lastID && idmap.Contains(img.ID) { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID - idmap[lastID] = struct{}{} + idmap.Add(lastID) } // backwards loop so that we keep the remaining indexes after removing items for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + m.FSLayers = slices.Delete(m.FSLayers, i, i+1) + m.History = slices.Delete(m.History, i, i+1) + m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index d9eca043be..3c9745dde5 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/pkg/strslice" "github.com/containers/image/v5/types" @@ -12,12 +13,7 @@ import ( ) // Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} +type Schema2Descriptor = manifest.Schema2Descriptor // BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { @@ -159,13 +155,13 @@ type Schema2Image struct { } // Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { +func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) { s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { + if err := json.Unmarshal(manifestBlob, &s2); err != nil { return nil, err } - if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType, - allowedFieldConfig|allowedFieldLayers); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } // Check manifest's and layers' media types. diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go index 7e4cc51835..c958a3fa3a 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -1,220 +1,32 @@ package manifest import ( - "encoding/json" - "fmt" - - platform "github.com/containers/image/v5/internal/pkg/platform" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/containers/image/v5/internal/manifest" ) // Schema2PlatformSpec describes the platform which a particular manifest is // specialized for. -type Schema2PlatformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} +type Schema2PlatformSpec = manifest.Schema2PlatformSpec // Schema2ManifestDescriptor references a platform-specific manifest. -type Schema2ManifestDescriptor struct { - Schema2Descriptor - Platform Schema2PlatformSpec `json:"platform"` -} +type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor // Schema2List is a list of platform-specific manifests. -type Schema2List struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []Schema2ManifestDescriptor `json:"manifests"` -} - -// MIMEType returns the MIME type of this particular manifest list. -func (list *Schema2List) MIMEType() string { - return list.MediaType -} - -// Instances returns a slice of digests of the manifests that this list knows of. -func (list *Schema2List) Instances() []digest.Digest { - results := make([]digest.Digest, len(list.Manifests)) - for i, m := range list.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the list. -func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range list.Manifests { - if manifest.Digest == instanceDigest { - return ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - }, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(list.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) - } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - list.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - list.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) - } - list.Manifests[i].MediaType = updates[i].MediaType - } - return nil -} - -// ChooseInstance parses blob as a schema2 manifest list, and returns the digest -// of the image which is appropriate for the current environment. -func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } - for _, wantedPlatform := range wantedPlatforms { - for _, d := range list.Manifests { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: dupStringSlice(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the list in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (list *Schema2List) Serialize() ([]byte, error) { - buf, err := json.Marshal(list) - if err != nil { - return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) - } - return buf, nil -} +type Schema2List = manifest.Schema2ListPublic // Schema2ListFromComponents creates a Schema2 manifest list instance from the // supplied data. func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { - list := Schema2List{ - SchemaVersion: 2, - MediaType: DockerV2ListMediaType, - Manifests: make([]Schema2ManifestDescriptor, len(components)), - } - for i, component := range components { - m := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: dupStringSlice(component.URLs), - }, - Schema2PlatformSpec{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: dupStringSlice(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - Features: dupStringSlice(component.Platform.Features), - }, - } - list.Manifests[i] = m - } - return &list + return manifest.Schema2ListPublicFromComponents(components) } // Schema2ListClone creates a deep copy of the passed-in list. func Schema2ListClone(list *Schema2List) *Schema2List { - return Schema2ListFromComponents(list.Manifests) -} - -// ToOCI1Index returns the list encoded as an OCI1 index. -func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { - components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) - for _, manifest := range list.Manifests { - converted := imgspecv1.Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: dupStringSlice(manifest.URLs), - Platform: &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), - OSVersion: manifest.Platform.OSVersion, - Variant: manifest.Platform.Variant, - }, - } - components = append(components, converted) - } - oci := OCI1IndexFromComponents(components, nil) - return oci, nil -} - -// ToSchema2List returns the list encoded as a Schema2 list. -func (list *Schema2List) ToSchema2List() (*Schema2List, error) { - return Schema2ListClone(list), nil + return manifest.Schema2ListPublicClone(list) } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled // JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { - list := Schema2List{ - Manifests: []Schema2ManifestDescriptor{}, - } - if err := json.Unmarshal(manifest, &list); err != nil { - return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) - } - if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, - allowedFieldManifests); err != nil { - return nil, err - } - return &list, nil -} - -// Clone returns a deep copy of this list and its contents. -func (list *Schema2List) Clone() List { - return Schema2ListClone(list) -} - -// ConvertToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return list.Clone(), nil - case imgspecv1.MediaTypeImageIndex: - return list.ToOCI1Index() - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) - } +func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) { + return manifest.Schema2ListPublicFromManifest(manifestBlob) } diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go index 58982597e6..1d6fdc9f56 100644 --- a/vendor/github.com/containers/image/v5/manifest/list.go +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -1,10 +1,7 @@ package manifest import ( - "fmt" - - "github.com/containers/image/v5/types" - digest "github.com/opencontainers/go-digest" + "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -21,56 +18,14 @@ var ( // Callers can either use this abstract interface without understanding the details of the formats, // or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members // directly. -type List interface { - // MIMEType returns the MIME type of this particular manifest list. - MIMEType() string - - // Instances returns a list of the manifests that this list knows of, other than its own. - Instances() []digest.Digest - - // Update information about the list's instances. The length of the passed-in slice must - // match the length of the list of instances which the list already contains, and every field - // must be specified. - UpdateInstances([]ListUpdate) error - - // Instance returns the size and MIME type of a particular instance in the list. - Instance(digest.Digest) (ListUpdate, error) - - // ChooseInstance selects which manifest is most appropriate for the platform described by the - // SystemContext, or for the current platform if the SystemContext doesn't specify any details. - ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) - - // Serialize returns the list in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded - // from, even if no modifications were made! - Serialize() ([]byte, error) - - // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. - ConvertToMIMEType(mimeType string) (List, error) - - // Clone returns a deep copy of this list and its contents. - Clone() List -} +type List = manifest.ListPublic // ListUpdate includes the fields which a List's UpdateInstances() method will modify. -type ListUpdate struct { - Digest digest.Digest - Size int64 - MediaType string -} +type ListUpdate = manifest.ListUpdate // ListFromBlob parses a list of manifests. -func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { - normalized := NormalizedMIMEType(manifestMIMEType) - switch normalized { - case DockerV2ListMediaType: - return Schema2ListFromManifest(manifest) - case imgspecv1.MediaTypeImageIndex: - return OCI1IndexFromManifest(manifest) - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") - } - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) { + return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType) } // ConvertListToMIMEType converts the passed-in manifest list to a manifest diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 53fc866a78..959aac935e 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -1,10 +1,9 @@ package manifest import ( - "encoding/json" "fmt" - internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" @@ -16,28 +15,28 @@ import ( // FIXME(runcom, mitr): should we have a mediatype pkg?? const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerV2ListMediaType = manifest.DockerV2ListMediaType // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip ) // NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation // on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -type NonImageArtifactError = internalManifest.NonImageArtifactError +type NonImageArtifactError = manifest.NonImageArtifactError // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. func SupportedSchema2MediaType(m string) error { @@ -102,102 +101,21 @@ type LayerInfo struct { // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType, - imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // Best effort to understand if this is an OCI image since mediaType - // wasn't in the manifest for OCI image-spec < 1.0.2. - // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - switch ociMan.Config.MediaType { - case imgspecv1.MediaTypeImageConfig: - return imgspecv1.MediaTypeImageManifest - case DockerV2Schema2ConfigMediaType: - // This case should not happen since a Docker image - // must declare a top-level media type and - // `meta.MediaType` has already been checked. - return DockerV2Schema2MediaType - } - // Maybe an image index or an OCI artifact. - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 { - if ociMan.Config.MediaType == "" { - return imgspecv1.MediaTypeImageIndex - } - // FIXME: this is mixing media types of manifests and configs. - return ociMan.Config.MediaType - } - // It's most likely an OCI artifact with a custom config media - // type which is not (and cannot) be covered by the media-type - // checks cabove. - return imgspecv1.MediaTypeImageManifest - } - return "" +func GuessMIMEType(manifestBlob []byte) string { + return manifest.GuessMIMEType(manifestBlob) } // Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil +func Digest(manifestBlob []byte) (digest.Digest, error) { + return manifest.Digest(manifestBlob) } // MatchesDigest returns true iff the manifest matches expectedDigest. // Error may be set if this returns false. // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, // or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil +func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) { + return manifest.MatchesDigest(manifestBlob, expectedDigest) } // AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. @@ -231,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool { // NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, // centralizing various workarounds. func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeImageIndex, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } + return manifest.NormalizedMIMEType(input) } // FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 2c52423d9f..1553eec6df 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/containers/image/v5/internal/manifest" internalManifest "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" @@ -12,6 +13,7 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. @@ -49,13 +51,13 @@ func SupportedOCI1MediaType(m string) error { } // OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { +func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { + if err := json.Unmarshal(manifestBlob, &oci1); err != nil { return nil, err } - if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - allowedFieldConfig|allowedFieldLayers); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } return &oci1, nil @@ -160,10 +162,8 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - for _, s := range strings.Split(mediatype, "+")[1:] { - if s == "encrypted" { - return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) - } + if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) } unsuffixedMediatype := strings.Split(mediatype, "+")[0] switch unsuffixedMediatype { @@ -178,7 +178,7 @@ func getEncryptedMediaType(mediatype string) (string, error) { // an error if the mediatype does not support decryption func getDecryptedMediaType(mediatype string) (string, error) { if !strings.HasSuffix(mediatype, "+encrypted") { - return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype) + return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype) } return strings.TrimSuffix(mediatype, "+encrypted"), nil diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 726207b9d4..193b08935a 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -1,232 +1,27 @@ package manifest import ( - "encoding/json" - "fmt" - "runtime" - - platform "github.com/containers/image/v5/internal/pkg/platform" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" + "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // OCI1Index is just an alias for the OCI index type, but one which we can // provide methods for. -type OCI1Index struct { - imgspecv1.Index -} - -// MIMEType returns the MIME type of this particular manifest index. -func (index *OCI1Index) MIMEType() string { - return imgspecv1.MediaTypeImageIndex -} - -// Instances returns a slice of digests of the manifests that this index knows of. -func (index *OCI1Index) Instances() []digest.Digest { - results := make([]digest.Digest, len(index.Manifests)) - for i, m := range index.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the index. -func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range index.Manifests { - if manifest.Digest == instanceDigest { - return ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - }, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(index.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) - } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - index.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - index.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) - } - index.Manifests[i].MediaType = updates[i].MediaType - } - return nil -} - -// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest -// of the image which is appropriate for the current environment. -func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } - for _, wantedPlatform := range wantedPlatforms { - for _, d := range index.Manifests { - if d.Platform == nil { - continue - } - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: dupStringSlice(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - - for _, d := range index.Manifests { - if d.Platform == nil { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the index in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (index *OCI1Index) Serialize() ([]byte, error) { - buf, err := json.Marshal(index) - if err != nil { - return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) - } - return buf, nil -} +type OCI1Index = manifest.OCI1IndexPublic // OCI1IndexFromComponents creates an OCI1 image index instance from the // supplied data. func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { - index := OCI1Index{ - imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: make([]imgspecv1.Descriptor, len(components)), - Annotations: dupStringStringMap(annotations), - }, - } - for i, component := range components { - var platform *imgspecv1.Platform - if component.Platform != nil { - platform = &imgspecv1.Platform{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: dupStringSlice(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - } - } - m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: dupStringSlice(component.URLs), - Annotations: dupStringStringMap(component.Annotations), - Platform: platform, - } - index.Manifests[i] = m - } - return &index + return manifest.OCI1IndexPublicFromComponents(components, annotations) } // OCI1IndexClone creates a deep copy of the passed-in index. func OCI1IndexClone(index *OCI1Index) *OCI1Index { - return OCI1IndexFromComponents(index.Manifests, index.Annotations) -} - -// ToOCI1Index returns the index encoded as an OCI1 index. -func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { - return OCI1IndexClone(index), nil -} - -// ToSchema2List returns the index encoded as a Schema2 list. -func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { - components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) - for _, manifest := range index.Manifests { - platform := manifest.Platform - if platform == nil { - platform = &imgspecv1.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - } - } - converted := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: dupStringSlice(manifest.URLs), - }, - Schema2PlatformSpec{ - OS: platform.OS, - Architecture: platform.Architecture, - OSFeatures: dupStringSlice(platform.OSFeatures), - OSVersion: platform.OSVersion, - Variant: platform.Variant, - }, - } - components = append(components, converted) - } - s2 := Schema2ListFromComponents(components) - return s2, nil + return manifest.OCI1IndexPublicClone(index) } // OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled // JSON, presumably generated by encoding a OCI1 manifest index. -func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { - index := OCI1Index{ - Index: imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: []imgspecv1.Descriptor{}, - Annotations: make(map[string]string), - }, - } - if err := json.Unmarshal(manifest, &index); err != nil { - return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) - } - if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - allowedFieldManifests); err != nil { - return nil, err - } - return &index, nil -} - -// Clone returns a deep copy of this list and its contents. -func (index *OCI1Index) Clone() List { - return OCI1IndexClone(index) -} - -// ConvertToMIMEType converts the passed-in image index to a manifest list of -// the specified type. -func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return index.ToSchema2List() - case imgspecv1.MediaTypeImageIndex: - return index.Clone(), nil - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) - } +func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) { + return manifest.OCI1IndexPublicFromManifest(manifestBlob) } diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index 148bc12fa3..53827b11af 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -58,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) { } func splitPathAndImageNonWindows(reference string) (string, string) { - sep := strings.SplitN(reference, ":", 2) - path := sep[0] - - var image string - if len(sep) == 2 { - image = sep[1] - } + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" return path, image } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 4face7213c..4e4433f12b 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -12,9 +12,9 @@ import ( "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 408af20a42..817a4e40d0 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -12,8 +12,8 @@ import ( "github.com/containers/image/v5/internal/imagesource/impl" "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" "github.com/docker/go-connections/tlsconfig" diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 5375d3324e..4a4ab9b2c6 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ociReference) StringWithinTransport() string { return fmt.Sprintf("%s:%s", ref.dir, ref.image) diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 325d6ee5ff..b12a929560 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -8,6 +8,7 @@ import ( "fmt" "net" "net/http" + "net/netip" "net/url" "os" "path" @@ -19,6 +20,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/imdario/mergo" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/net/http2" "gopkg.in/yaml.v3" ) @@ -205,10 +207,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) { if isConfigTransportTLS(*clientConfig) { var err error // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } + userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo) if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil { return nil, err } @@ -255,7 +254,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig { mergedConfig := &restConfig{} // blindly overwrite existing values based on precedence @@ -274,7 +273,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest } // REMOVED: prompting for missing information. - return mergedConfig, nil + return mergedConfig } // ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. @@ -873,11 +872,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error noProxyEnv := os.Getenv("NO_PROXY") noProxyRules := strings.Split(noProxyEnv, ",") - cidrs := []*net.IPNet{} + cidrs := []netip.Prefix{} for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) + prefix, err := netip.ParsePrefix(noProxyRule) + if err == nil { + cidrs = append(cidrs, prefix) } } @@ -888,7 +887,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error return func(req *http.Request) (*url.URL, error) { host := req.URL.Host // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { + if _, err := netip.ParseAddr(host); err != nil { var err error host, _, err = net.SplitHostPort(req.URL.Host) if err != nil { @@ -896,15 +895,15 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } } - ip := net.ParseIP(host) - if ip == nil { + ip, err := netip.ParseAddr(host) + if err != nil { return delegate(req) } - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } + if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool { + return cidr.Contains(ip) + }) { + return nil, nil } return delegate(req) diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index 38f5d531d2..f3d5662e66 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -146,11 +146,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str // convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; // currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { + _, repo, gotRepo := strings.Cut(ref, "/") + if !gotRepo { return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil + return reference.Domain(c.ref.dockerReference) + "/" + repo, nil } // These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index d5dbaf27eb..92aec0266c 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -17,10 +17,12 @@ import ( "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) type openshiftImageDestination struct { @@ -180,12 +182,11 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, if err != nil { return err } - existingSigNames := map[string]struct{}{} + existingSigNames := set.New[string]() for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} + existingSigNames.Add(sig.objectMeta.Name) } -sigExists: for _, newSigWithFormat := range signatures { newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) if !ok { @@ -193,10 +194,10 @@ sigExists: } newSig := newSigSimple.UntrustedSignature() - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } + if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool { + return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. @@ -208,7 +209,7 @@ sigExists: return fmt.Errorf("generating random signature len %d: %w", n, err) } signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { + if !existingSigNames.Contains(signatureName) { break } } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go index 1599039445..0ba435d560 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref openshiftReference) StringWithinTransport() string { return reference.FamiliarString(ref.dockerReference) diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 14a84414ab..d83f85b90f 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -75,12 +75,11 @@ type ostreeImageCloser struct { func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo + image, repoPart, gotRepoPart := strings.Cut(ref, "@/") + if !gotRepoPart { + repo = defaultOSTreeRepo } else { - image, repo = s[0], "/"+s[1] + repo = "/" + repoPart } return NewReference(image, repo) @@ -134,7 +133,7 @@ func (ref ostreeReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ostreeReference) StringWithinTransport() string { return fmt.Sprintf("%s@%s", ref.image, ref.repo) @@ -157,11 +156,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string { // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + repo, _, gotTag := strings.Cut(ref.image, ":") + if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) } - name := s[0] + name := repo res := []string{} for { res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/src.go b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go index 8cd1bd0e11..e94809753f 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/src.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go @@ -10,9 +10,9 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" @@ -207,7 +207,7 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R break } s := signalCloseReader{ - closed: make(chan interface{}), + closed: make(chan struct{}), stream: io.LimitReader(file, int64(c.Length)), } streams <- s @@ -218,7 +218,7 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R } type signalCloseReader struct { - closed chan interface{} + closed chan struct{} stream io.Reader } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 426640366f..427610fab0 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -6,6 +6,7 @@ import ( "time" "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -19,12 +20,12 @@ type locationKey struct { blobDigest digest.Digest } -// cache implements an in-memory-only BlobInfoCache +// cache implements an in-memory-only BlobInfoCache. type cache struct { mutex sync.Mutex // The following fields can only be accessed with mutex held. uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest } @@ -44,7 +45,7 @@ func New() types.BlobInfoCache { func new2() *cache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, compressors: map[digest.Digest]string{}, } @@ -67,7 +68,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings // when we already record a (compressed, uncompressed) pair. - if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { return anyDigest } return "" @@ -88,10 +89,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] if !ok { - anyDigestSet = map[digest.Digest]struct{}{} + anyDigestSet = set.New[digest.Digest]() mem.digestsByUncompressed[uncompressed] = anyDigestSet } - anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. + anyDigestSet.Add(anyDigest) } // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, @@ -171,10 +172,11 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map - for d := range otherDigests { - if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + for _, d := range otherDigests.Values() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + } } } if uncompressedDigest != primaryDigest { diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index ce688d1170..cd9f8593f5 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -30,7 +30,7 @@ var ( // Zstd compression. Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - // Zstd:chunked compression. + // ZstdChunked is a Zstd compresion with chunk metadta which allows random access to individual files. ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ nil, ZstdDecompressor, compressor.ZstdCompressor) diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index c363cb535b..0e3003cecb 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" @@ -139,10 +140,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allKeys := make(map[string]bool) - addKey := func(s string) { - allKeys[s] = true - } + allKeys := set.New[string]() // To use GetCredentials, we must at least convert the URL forms into host names. // While we're at it, we’ll also canonicalize docker.io to the standard format. @@ -166,14 +164,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addKey(registry) + allKeys.Add(registry) } for key := range auths.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { key = "docker.io" } - addKey(key) + allKeys.Add(key) } } // External helpers. @@ -188,7 +186,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon } } for registry := range creds { - addKey(registry) + allKeys.Add(registry) } } } @@ -196,7 +194,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for key := range allKeys { + for _, key := range allKeys.Values() { authConf, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -394,17 +392,16 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { if isNamespaced { logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) return - } else { - err := deleteAuthFromCredHelper(helper, key) - if err == nil { - logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) - isLoggedIn = true - return - } - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) - return - } + } + err := deleteAuthFromCredHelper(helper, key) + if err == nil { + logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) + isLoggedIn = true + return + } + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) + return } multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)) } @@ -759,8 +756,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth return types.DockerAuthConfig{}, err } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { + user, passwordPart, valid := strings.Cut(string(decoded), ":") + if !valid { // if it's invalid just skip, as docker does if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. @@ -770,8 +767,7 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth return types.DockerAuthConfig{}, nil } - user := parts[0] - password := strings.Trim(parts[1], "\x00") + password := strings.Trim(passwordPart, "\x00") return types.DockerAuthConfig{ Username: user, Password: password, @@ -786,7 +782,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string { stripped = strings.TrimPrefix(stripped, "https://") if legacyFormat || stripped != key { - stripped = strings.SplitN(stripped, "/", 2)[0] + stripped, _, _ = strings.Cut(stripped, "/") } return normalizeRegistry(stripped) diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7ebd3fd220..3a11542c62 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -308,9 +309,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl // updateWithConfigurationFrom updates c with configuration from updates. // In case of conflict, updates is preferred. func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { - for name, value := range updates.namedAliases { - c.namedAliases[name] = value - } + maps.Copy(c.namedAliases, updates.namedAliases) } func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 463e770280..f45fd9de11 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -16,6 +16,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) // systemRegistriesConfPath is the path to the system-wide registry @@ -1019,12 +1020,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { // Go maps have a non-deterministic order when iterating the keys, so // we dump them in a slice and sort it to enforce some order in // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // the configuration where a non-deterministic order could easily cause + // configuration where a non-deterministic order could easily cause // confusion. - prefixes := []string{} - for prefix := range registryMap { - prefixes = append(prefixes, prefix) - } + prefixes := maps.Keys(registryMap) sort.Strings(prefixes) c.partialV2.Registries = []Registry{} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 285203bad4..96af5ace6a 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -12,6 +12,7 @@ import ( "time" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc @@ -80,12 +81,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { } func hasFile(files []os.DirEntry, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false + return slices.ContainsFunc(files, func(f os.DirEntry) bool { + return f.Name() == name + }) } // NewTransport Creates a default transport diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go index 2037f25082..4c09010714 100644 --- a/vendor/github.com/containers/image/v5/sif/transport.go +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -87,7 +87,7 @@ func (ref sifReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). func (ref sifReference) StringWithinTransport() string { diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index 2e7f44ce5f..52a2dff4a7 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/signature/internal" "github.com/sigstore/fulcio/pkg/certificate" "github.com/sigstore/sigstore/pkg/cryptoutils" + "golang.org/x/exp/slices" ) // fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. @@ -109,7 +110,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, // // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to - // make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. // == Validate the recorded OIDC issuer gotOIDCIssuer := false @@ -136,15 +137,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, } // == Validate the OIDC subject - foundEmail := false - // TO DO: Use slices.Contains after we update to Go 1.18 - for _, certEmail := range untrustedCertificate.EmailAddresses { - if certEmail == f.subjectEmail { - foundEmail = true - break - } - } - if !foundEmail { + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)", f.subjectEmail, untrustedCertificate.EmailAddresses)) diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go index 0f39fe0ad2..a9d127e656 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/json.go +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "io" + + "github.com/containers/image/v5/internal/set" ) // JSONFormatError is returned when JSON does not match expected format. @@ -20,8 +22,8 @@ func (err JSONFormatError) Error() string { // // The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, // we could use reflection to automate this. Later? -func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() dec := json.NewDecoder(bytes.NewReader(data)) t, err := dec.Token() @@ -45,10 +47,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) } - if _, ok := seenKeys[key]; ok { + if seenKeys.Contains(key) { return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) } - seenKeys[key] = struct{}{} + seenKeys.Add(key) valuePtr := fieldResolver(key) if valuePtr == nil { @@ -68,11 +70,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa // ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect // (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields // must be present exactly once, and none other fields are accepted. -func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} { +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} + seenKeys.Add(key) return valuePtr } return nil @@ -80,7 +82,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string] return err } for key := range exactFields { - if _, ok := seenKeys[key]; !ok { + if !seenKeys.Contains(key) { return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) } } diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index 27c2c7e637..d439b5f7a7 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -28,7 +28,7 @@ type UntrustedRekorSET struct { } type UntrustedRekorPayload struct { - Body []byte // In cosign, this is an interface{}, but only a string works + Body []byte // In cosign, this is an any, but only a string works IntegratedTime int64 LogIndex int64 LogID string @@ -51,7 +51,7 @@ func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { - return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, "Payload": &s.UntrustedPayload, }) @@ -63,7 +63,7 @@ var _ json.Marshaler = (*UntrustedRekorSET)(nil) // MarshalJSON implements the json.Marshaler interface. func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, "Payload": s.UntrustedPayload, }) @@ -86,7 +86,7 @@ func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { - return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "body": &p.Body, "integratedTime": &p.IntegratedTime, "logIndex": &p.LogIndex, @@ -100,7 +100,7 @@ var _ json.Marshaler = (*UntrustedRekorPayload)(nil) // MarshalJSON implements the json.Marshaler interface. func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "body": p.Body, "integratedTime": p.IntegratedTime, "logIndex": p.LogIndex, @@ -159,7 +159,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver } hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) if err != nil { - // Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled, + // Coverage: hashedRekord.Spec is an any that was just unmarshaled, // so this should never fail. return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) } diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index afacd82034..1ac14ac3c7 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -55,19 +55,19 @@ func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { return nil, errors.New("Unexpected empty signature content") } - critical := map[string]interface{}{ + critical := map[string]any{ "type": sigstoreSignatureType, "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, } - optional := map[string]interface{}{} + optional := map[string]any{} if s.UntrustedCreatorID != nil { optional["creator"] = *s.UntrustedCreatorID } if s.UntrustedTimestamp != nil { optional["timestamp"] = *s.UntrustedTimestamp } - signature := map[string]interface{}{ + signature := map[string]any{ "critical": critical, "optional": optional, } @@ -92,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage - if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "critical": &critical, "optional": &optional, }); err != nil { @@ -104,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { var gotCreatorID, gotTimestamp = false, false // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. if !bytes.Equal(optional, []byte("null")) { - if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { switch key { case "creator": gotCreatorID = true @@ -113,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { gotTimestamp = true return ×tamp default: - var ignore interface{} + var ignore any return &ignore } }); err != nil { @@ -133,7 +133,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { var t string var image, identity json.RawMessage - if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ "type": &t, "image": &image, "identity": &identity, @@ -145,14 +145,14 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { } var digestString string - if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ "docker-manifest-digest": &digestString, }); err != nil { return err } s.UntrustedDockerManifestDigest = digest.Digest(digestString) - return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ "docker-reference": &s.UntrustedDockerReference, }) } diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 5ca4ad7130..7eb5cab7d8 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil) func (p *Policy) UnmarshalJSON(data []byte) error { *p = Policy{} transports := policyTransportsMap{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "default": return &p.Default @@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyTransportScopes{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { // transport can be nil transport := transports.Get(key) // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. @@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyRequirements{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. if _, ok := tmpMap[key]; ok { return nil @@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { *pr = prInsecureAcceptAnything{} var tmp prInsecureAcceptAnything - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil) func (pr *prReject) UnmarshalJSON(data []byte) error { *pr = prReject{} var tmp prReject - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error { var tmp prSignedBy var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false var signedIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "type": return &tmp.Type @@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { *pr = prSignedBaseLayer{} var tmp prSignedBaseLayer var baseLayerIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "baseLayerIdentity": &baseLayerIdentity, }); err != nil { @@ -564,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil) func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { *prm = prmMatchExact{} var tmp prmMatchExact - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -594,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { *prm = prmMatchRepoDigestOrExact{} var tmp prmMatchRepoDigestOrExact - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -624,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil) func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { *prm = prmMatchRepository{} var tmp prmMatchRepository - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -664,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil) func (prm *prmExactReference) UnmarshalJSON(data []byte) error { *prm = prmExactReference{} var tmp prmExactReference - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "dockerReference": &tmp.DockerReference, }); err != nil { @@ -706,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil) func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { *prm = prmExactRepository{} var tmp prmExactRepository - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "dockerRepository": &tmp.DockerRepository, }); err != nil { @@ -778,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil) func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { *prm = prmRemapIdentity{} var tmp prmRemapIdentity - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "prefix": &tmp.Prefix, "signedPrefix": &tmp.SignedPrefix, diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index eeec6dc3eb..d8c6a97f1b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -147,7 +147,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool var fulcio prSigstoreSignedFulcio var signedIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "type": return &tmp.Type @@ -298,7 +298,7 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { *f = prSigstoreSignedFulcio{} var tmp prSigstoreSignedFulcio var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "caPath": gotCAPath = true diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go index 533a997b1c..4f8d0da389 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -46,7 +46,7 @@ type PolicyRequirement interface { // - sarRejected if the signature has not been verified; // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // - sarUnknown if this PolicyRequirement does not deal with signatures. // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. // Returning sarUnknown and a non-nil error value is invalid. // WARNING: This makes the signature contents acceptable for further processing, diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index ef98b8b83f..a4187735b7 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { @@ -67,10 +68,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } + if slices.Contains(trustedIdentities, keyIdentity) { + return nil } // Coverage: We use a private GPG home directory and only import trusted keys, so this should // not be reachable. diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go index 0233c4cb86..04e05fcb42 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -33,12 +33,12 @@ import ( // limitations under the License. const ( - // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType + // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType. sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" ) // from sigstore/cosign/pkg/cosign.loadPrivateKey -// FIXME: Do we need all of these key formats, and all of those +// FIXME: Do we need all of these key formats? func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { // Decrypt first p, _ := pem.Decode(key) diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go b/vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go index 6df7c52201..f240d8cab8 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go @@ -17,7 +17,7 @@ func leveledLoggerForLogrus(logger *logrus.Logger) retryablehttp.LeveledLogger { } // log is the actual conversion implementation -func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []interface{}) { +func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []any) { fields := logrus.Fields{} for i := 0; i < len(keysAndValues)-1; i += 2 { key := keysAndValues[i] @@ -32,21 +32,21 @@ func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []inte } // Debug implements retryablehttp.LeveledLogger -func (l *leveledLogger) Debug(msg string, keysAndValues ...interface{}) { +func (l *leveledLogger) Debug(msg string, keysAndValues ...any) { l.log(logrus.DebugLevel, msg, keysAndValues) } // Error implements retryablehttp.LeveledLogger -func (l *leveledLogger) Error(msg string, keysAndValues ...interface{}) { +func (l *leveledLogger) Error(msg string, keysAndValues ...any) { l.log(logrus.ErrorLevel, msg, keysAndValues) } // Info implements retryablehttp.LeveledLogger -func (l *leveledLogger) Info(msg string, keysAndValues ...interface{}) { +func (l *leveledLogger) Info(msg string, keysAndValues ...any) { l.log(logrus.InfoLevel, msg, keysAndValues) } // Warn implements retryablehttp.LeveledLogger -func (l *leveledLogger) Warn(msg string, keysAndValues ...interface{}) { +func (l *leveledLogger) Warn(msg string, keysAndValues ...any) { l.log(logrus.WarnLevel, msg, keysAndValues) } diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index e91f46abac..17d63f89e7 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -81,19 +81,19 @@ func (s untrustedSignature) MarshalJSON() ([]byte, error) { if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { return nil, errors.New("Unexpected empty signature content") } - critical := map[string]interface{}{ + critical := map[string]any{ "type": signatureType, "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, } - optional := map[string]interface{}{} + optional := map[string]any{} if s.UntrustedCreatorID != nil { optional["creator"] = *s.UntrustedCreatorID } if s.UntrustedTimestamp != nil { optional["timestamp"] = *s.UntrustedTimestamp } - signature := map[string]interface{}{ + signature := map[string]any{ "critical": critical, "optional": optional, } @@ -118,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error { // Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "critical": &critical, "optional": &optional, }); err != nil { @@ -128,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var creatorID string var timestamp float64 var gotCreatorID, gotTimestamp = false, false - if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { switch key { case "creator": gotCreatorID = true @@ -137,7 +137,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { gotTimestamp = true return ×tamp default: - var ignore interface{} + var ignore any return &ignore } }); err != nil { @@ -156,7 +156,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var t string var image, identity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ "type": &t, "image": &image, "identity": &identity, @@ -168,14 +168,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { } var digestString string - if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ "docker-manifest-digest": &digestString, }); err != nil { return err } s.UntrustedDockerManifestDigest = digest.Digest(digestString) - return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ "docker-reference": &s.UntrustedDockerReference, }) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index ae3bfa8fa4..cece6e496d 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -21,6 +21,7 @@ import ( "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" @@ -34,6 +35,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) var ( @@ -242,7 +244,7 @@ type zstdFetcher struct { // GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []private.ImageSourceChunk + newChunks := make([]private.ImageSourceChunk, 0, len(chunks)) for _, v := range chunks { i := private.ImageSourceChunk{ Offset: v.Offset, @@ -795,14 +797,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) + dataBlobs := set.New[digest.Digest]() for blob := range s.filenames { - dataBlobs[blob] = struct{}{} + dataBlobs.Add(blob) } for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) + dataBlobs.Delete(layerBlob.Digest) } - for blob := range dataBlobs { + for _, blob := range dataBlobs.Values() { v, err := os.ReadFile(s.filenames[blob]) if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) @@ -883,9 +885,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob if err != nil { return err } - newBlob := make([]byte, len(manifestBlob)) - copy(newBlob, manifestBlob) - s.manifest = newBlob + s.manifest = slices.Clone(manifestBlob) s.manifestDigest = digest return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index dbb9804a6b..2ae0692c33 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte @@ -52,17 +53,15 @@ func newReference(transport storageTransport, named reference.Named, id string) // imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { repo := ref.Name() - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if named.Name() == repo { - return true - } + return slices.ContainsFunc(image.Names, func(name string) bool { + if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo { + return true } - } - return false + return false + }) } -// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a +// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a // multi-arch manifest that matches the passed-in digest, and the image is the per-platform // image instance that matches sys. // @@ -170,11 +169,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag // sake of older consumers that don't know there's a whole list in there now. if s.named != nil { if digested, ok := s.named.(reference.Digested); ok { - for _, digest := range loadedImage.Digests { - if digest == digested.Digest() { - loadedImage.Digest = digest - break - } + digest := digested.Digest() + if slices.Contains(loadedImage.Digests, digest) { + loadedImage.Digest = digest } } } @@ -207,10 +204,10 @@ func (s storageReference) StringWithinTransport() string { } res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" if s.named != nil { - res = res + s.named.String() + res += s.named.String() } if s.id != "" { - res = res + "@" + s.id + res += "@" + s.id } return res } @@ -218,10 +215,10 @@ func (s storageReference) StringWithinTransport() string { func (s storageReference) PolicyConfigurationIdentity() string { res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" if s.named != nil { - res = res + s.named.String() + res += s.named.String() } if s.id != "" { - res = res + "@" + s.id + res += "@" + s.id } return res } diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index d1affc5e9c..08f56f4bbc 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -186,7 +186,7 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st } // GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) { if instanceDigest != nil { key := manifestBigDataKey(*instanceDigest) blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 104e9d8cca..58ba3ee651 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -234,35 +234,30 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc reference = reference[closeIndex+1:] // Peel off a "driver@" from the start. driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { + driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@") + if !gotDriver { + storeSpec = driverPart1 if storeSpec == "" { return nil, ErrInvalidReference } } else { - driverInfo = driverSplit[0] + driverInfo = driverPart1 if driverInfo == "" { return nil, ErrInvalidReference } - storeSpec = driverSplit[1] + storeSpec = driverPart2 if storeSpec == "" { return nil, ErrInvalidReference } } // Peel off a ":options" from the end. var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] + storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":") + if gotOptions { + options = strings.Split(optionsPart, ",") } // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } + storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+" // The rest is our graph root. rootInfo := storeSpec // Check that any paths are absolute paths. diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index 690067ec3d..d5578d9e8a 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -9,8 +9,8 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" ) // ConfigUpdater is an interface that ImageReferences for "tarball" images also @@ -35,9 +35,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[ if r.annotations == nil { r.annotations = make(map[string]string) } - for k, v := range annotations { - r.annotations[k] = v - } + maps.Copy(r.annotations, annotations) return nil } @@ -73,7 +71,7 @@ func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContex func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { for _, filename := range r.filenames { if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing %q: %v", filename, err) + return fmt.Errorf("error removing %q: %w", filename, err) } } return nil diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 1dc4c3ad94..0fb3a83934 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -18,6 +18,8 @@ import ( digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) type tarballImageSource struct { @@ -62,13 +64,13 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } else { file, err = os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + return nil, fmt.Errorf("error opening %q for reading: %w", filename, err) } defer file.Close() reader = file fileinfo, err := file.Stat() if err != nil { - return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + return nil, fmt.Errorf("error reading size of %q: %w", filename, err) } blobSize = fileinfo.Size() blobTime = fileinfo.ModTime() @@ -168,10 +170,6 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System MediaType: blobTypes[i], }) } - annotations := make(map[string]string) - for k, v := range r.annotations { - annotations[k] = v - } manifest := imgspecv1.Manifest{ Versioned: imgspecs.Versioned{ SchemaVersion: 2, @@ -182,7 +180,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System MediaType: imgspecv1.MediaTypeImageConfig, }, Layers: layerDescriptors, - Annotations: annotations, + Annotations: maps.Clone(r.annotations), } // Encode the manifest. @@ -228,20 +226,19 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. - for i := range is.blobIDs { - if blobinfo.Digest == is.blobIDs[i] { - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil - } - reader, err := os.Open(is.filenames[i]) - if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) - } - return reader, is.blobSizes[i], nil - } + i := slices.Index(is.blobIDs, blobinfo.Digest) + if i == -1 { + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) + } + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) } - return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) + return reader, is.blobSizes[i], nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 62d767b583..1d9c2dc35d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -25,24 +25,24 @@ import ( // ParseImageName converts a URL-like image name to a types.ImageReference. func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { + transportName, withinTransport, valid := strings.Cut(imgName, ":") + if !valid { return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) } - transport := transports.Get(parts[0]) + transport := transports.Get(transportName) if transport == nil { - return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName) } - return transport.ParseReference(parts[1]) + return transport.ParseReference(withinTransport) } // TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when // the transport is unknown or when the input is invalid. func TransportFromImageName(imageName string) types.ImageTransport { // Keep this in sync with ParseImageName! - parts := strings.SplitN(imageName, ":", 2) - if len(parts) == 2 { - return transports.Get(parts[0]) + transportName, _, valid := strings.Cut(imageName, ":") + if valid { + return transports.Get(transportName) } return nil } diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go index 46ee3710fc..834f33b489 100644 --- a/vendor/github.com/containers/image/v5/transports/transports.go +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -5,6 +5,7 @@ import ( "sort" "sync" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" ) @@ -66,22 +67,21 @@ func Register(t types.ImageTransport) { // This is the generally recommended way to refer to images in the UI. // // NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } +var deprecatedTransports = set.NewWithValues("atomic") + // ListNames returns a list of non deprecated transport names. // Deprecated transports can be used, but are not presented to users. func ListNames() []string { kt.mu.Lock() defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } var names []string for _, transport := range kt.transports { - if !deprecated[transport.Name()] { + if !deprecatedTransports.Contains(transport.Name()) { names = append(names, transport.Name()) } } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 33a54566c2..6ea414b867 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -11,7 +11,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// ImageTransport is a top-level namespace for ways to to store/load an image. +// ImageTransport is a top-level namespace for ways to store/load an image. // It should generally correspond to ImageSource/ImageDestination implementations. // // Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. @@ -48,7 +48,7 @@ type ImageReference interface { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). StringWithinTransport() string @@ -138,7 +138,7 @@ type BlobInfo struct { // or if it should be compressed or decompressed. The field defaults to // preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove - // field out out of BlobInfo. + // field out of BlobInfo. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be @@ -149,7 +149,7 @@ type BlobInfo struct { // CryptoOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer was encrypted/decrypted // TODO: To remove together with CompressionOperation in re-design to - // remove field out out of BlobInfo. + // remove field out of BlobInfo. CryptoOperation LayerCrypto // Before adding any fields to this struct, read the NOTE above. } diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index dffacff112..0728bfe18f 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -1,5 +1,6 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. Aanand Prasad Aaron Davidson @@ -7,16 +8,17 @@ Aaron Feng Aaron Hnatiw Aaron Huslage Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab +Abirdcfly +Ada Mancini Adam Avilla Adam Dobrawy Adam Eijdenberg @@ -26,6 +28,7 @@ Adam Mills Adam Pointer Adam Singer Adam Walz +Adam Williams Addam Hardy Aditi Rajagopal Aditya @@ -51,6 +54,7 @@ Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle +Akshay Moghe Al Tobey alambike Alan Hoyle @@ -58,9 +62,11 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Albin Kerouanton +Albin Kerouanton +Alec Benson Alejandro González Hevia Aleksa Sarai +Aleksandr Chebotov Aleksandrs Fadins Alena Prokharchyk Alessandro Boch @@ -72,6 +78,7 @@ Alex Crawford Alex Ellis Alex Gaynor Alex Goodman +Alex Nordlund Alex Olshansky Alex Samorukov Alex Warhawk @@ -79,7 +86,8 @@ Alexander Artemenko Alexander Boyd Alexander Larsson Alexander Midlash -Alexander Morozov +Alexander Morozov +Alexander Polakov Alexander Shopov Alexandre Beslic Alexandre Garnier @@ -90,7 +98,8 @@ Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin -Alexis THOMAS +Alexis Ries +Alexis Thomas Alfred Landrum Ali Dehghani Alicia Lauerman @@ -103,6 +112,7 @@ Alvin Deng Alvin Richards amangoel Amen Belayneh +Ameya Gawde Amir Goldstein Amit Bakshi Amit Krishnan @@ -126,6 +136,7 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan +Andrei Ushakov Andrei Vagin Andrew C. Bodine Andrew Clay Shafer @@ -135,6 +146,7 @@ Andrew Gerrand Andrew Guenther Andrew He Andrew Hsu +Andrew Kim Andrew Kuklewicz Andrew Macgregor Andrew Macpherson @@ -150,15 +162,17 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins -andy Andy Chambers andy diller Andy Goldstein Andy Kipp +Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson +Andy Zhang Anes Hasicic +Angel Velazquez Anil Belur Anil Madhavapeddy Ankit Jain @@ -179,20 +193,24 @@ Antonio Murdaca Antonis Kalipetis Antony Messerli Anuj Bahuguna +Anuj Varma Anusha Ragunathan +Anyu Wang apocas Arash Deshmeh ArikaChen -Arko Dasgupta +Arko Dasgupta Arnaud Lefebvre -Arnaud Porterie +Arnaud Porterie Arnaud Rebillout +Artem Khramov Arthur Barr Arthur Gautier Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -200,17 +218,21 @@ Avi Miller Avi Vaid ayoshitake Azat Khuyiyakhmetov +Bao Yonglei Bardia Keyoumarsi Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker +Bastien Pascard bdevloed +Bearice Ren Ben Bonnefoy Ben Firshman Ben Golub Ben Gould Ben Hall +Ben Langfeld Ben Sargent Ben Severson Ben Toews @@ -218,6 +240,7 @@ Ben Wiklund Benjamin Atkin Benjamin Baker Benjamin Boudreau +Benjamin Böhmke Benjamin Yolken Benny Ng Benoit Chesneau @@ -231,12 +254,15 @@ Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang +Billy Ridgway Bily Zhang Bin Liu Bingshen Wang +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott +Bojun Zhu Boqin Qin Boris Pruessmann Boshi Lian @@ -252,6 +278,7 @@ Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer +Brett Milford Brett Randall Brian (bex) Exelbierd Brian Bland @@ -282,6 +309,7 @@ Byung Kang Caleb Spare Calen Pennington Cameron Boehmer +Cameron Sparr Cameron Spear Campbell Allen Candid Dauth @@ -316,6 +344,7 @@ Charlie Drage Charlie Lewis Chase Bolt ChaYoung You +Chee Hau Lim Chen Chao Chen Chuanliang Chen Hanxiao @@ -325,6 +354,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chenyang Yan chenyuzhu Chetan Birajdar Chewey @@ -339,6 +369,7 @@ Chris Fordham Chris Gavin Chris Gibson Chris Khoo +Chris Kreussling (Flatbush Gardener) Chris McKinnel Chris McKinnel Chris Price @@ -351,6 +382,7 @@ Chris Telfer Chris Wahl Chris Weyl Chris White +Christian Becker Christian Berendt Christian Brauner Christian Böhme @@ -359,6 +391,7 @@ Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu +Christoph Ziebuhr Christophe Mehay Christophe Troestler Christophe Vidal @@ -372,7 +405,9 @@ Christy Norman Chun Chen Ciro S. Costa Clayton Coleman +Clint Armstrong Clinton Kitson +clubby789 Cody Roseborough Coenraad Loubser Colin Dunklau @@ -383,19 +418,23 @@ Colin Walters Collin Guarino Colm Hally companycy +Conor Evans Corbin Coleman Corey Farrell Cory Forsyth +Cory Snider cressie176 -CrimsonGlory Cristian Ariza Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei +cuishuang +Cuong Manh Le Cyprian Gracz Cyril F +Da McGrady Daan van Berkel Daehyeok Mun Dafydd Crosby @@ -413,6 +452,7 @@ Dan Hirsch Dan Keder Dan Levy Dan McPherson +Dan Plamadeala Dan Stine Dan Williams Dani Hodovic @@ -433,6 +473,7 @@ Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg +Daniel P. Berrangé Daniel Robinson Daniel S Daniel Sweet @@ -441,6 +482,7 @@ Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniele Rondina Danny Berger Danny Milosavljevic Danny Yates @@ -456,6 +498,7 @@ Dave Henderson Dave MacDonald Dave Tucker David Anderson +David Bellotti David Calavera David Chung David Corking @@ -470,9 +513,11 @@ David Lawrence David Lechner David M. Karr David Mackey +David Manouchehri David Mat David Mcanulty David McKay +David O'Rourke David P Hilton David Pelaez David R. Jenni @@ -503,14 +548,14 @@ Dennis Docter Derek Derek Derek Ch -Derek McGowan +Derek McGowan Deric Crago Deshi Xiao -devmeyster Devon Estes Devvyn Murphy Dharmit Shah Dhawal Yogesh Bhanushali +Dhilip Kumars Diego Romero Diego Siqueira Dieter Reuter @@ -522,9 +567,11 @@ Dimitris Rozakis Dimitry Andric Dinesh Subhraveti Ding Fei +dingwei Diogo Monica DiuDiugirl Djibril Koné +Djordje Lukic dkumor Dmitri Logvinenko Dmitri Shuralyov @@ -536,6 +583,8 @@ Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] Dolph Mathews Dominic Tubach Dominic Yin @@ -569,8 +618,9 @@ Eivind Uggedal Elan Ruusamäe Elango Sivanandam Elena Morozova -Eli Uriegas +Eli Uriegas Elias Faxö +Elias Koromilas Elias Probst Elijah Zupancic eluck @@ -580,6 +630,7 @@ Emil Hernvall Emily Maier Emily Rose Emir Ozer +Eng Zer Jun Enguerran Eohyung Lee epeterso @@ -588,6 +639,7 @@ Eric Curtin Eric G. Noriega Eric Hanchrow Eric Lee +Eric Mountain Eric Myhre Eric Paris Eric Rafaloff @@ -597,17 +649,21 @@ Eric Soderstrom Eric Yang Eric-Olivier Lamey Erica Windisch +Erich Cordoba Erik Bray Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen +Erik Sipsma Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Espen Suenson Ethan Bell Ethan Mosbaugh +Euan Harris Euan Kemp Eugen Krizo Eugene Yakubovich @@ -657,6 +713,7 @@ Fengtu Wang Ferenc Szabo Fernando Fero Volar +Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira @@ -673,6 +730,7 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy +Francesco Degrassi Francesco Mari Francis Chuang Francisco Carriedo @@ -681,18 +739,23 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang +Frank Yang Fred Lifton Frederick F. Kautz IV +Frederico F. de Oliveira Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Goller +Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda @@ -707,12 +770,14 @@ Gaurav Singh Gaël PORTAY Genki Takiuchi GennadySpb +Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze George MacRorie George Xie Georgi Hristozov +Georgy Yakovlev Gereon Frey German DZ Gert van Valkenhoef @@ -724,6 +789,7 @@ Gildas Cuisinier Giovan Isa Musthofa gissehel Giuseppe Mazzotta +Giuseppe Scrivano Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington @@ -746,6 +812,8 @@ Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI guoxiuyan Guri Gurjeet Singh @@ -755,12 +823,13 @@ gwx296173 Günter Zöchbauer Haichao Yang haikuoliu +haining.cao Hakan Özler Hamish Hutchings Hannes Ljungberg Hans Kristian Flaatten Hans Rødtang -Hao Shu Wei +Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers Harald Niesche @@ -792,15 +861,16 @@ Hu Tao HuanHuan Ye Huanzhong Zhang Huayi Zhang +Hugo Barrera Hugo Duncan Hugo Marisco <0x6875676f@gmail.com> +Hui Kang Hunter Blanks huqun Huu Nguyen -hyeongkyu.lee +Hyeongkyu Lee Hyzhou Zhy Iago López Galeiras -Ian Babrou Ian Bishop Ian Bull Ian Calvert @@ -817,6 +887,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov @@ -847,7 +918,8 @@ Jaivish Kothari Jake Champlin Jake Moshenko Jake Sanders -jakedt +Jakub Drahos +Jakub Guzik James Allen James Carey James Carr @@ -859,11 +931,14 @@ James Lal James Mills James Nesbitt James Nugent +James Sanders James Turnbull James Watkins-Harvey Jamie Hannaford Jamshid Afshar +Jan Breig Jan Chren +Jan Götte Jan Keromnes Jan Koprowski Jan Pazdziora @@ -876,7 +951,6 @@ Januar Wayong Jared Biel Jared Hocutt Jaroslaw Zabiello -jaseg Jasmine Hegman Jason A. Donenfeld Jason Divock @@ -891,10 +965,11 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Javier Bassi jaxgeller -Jay Jay Jay Kamat +Jay Lim Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido @@ -912,12 +987,14 @@ Jeff Minard Jeff Nickoloff Jeff Silberman Jeff Welch +Jeff Zvier Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske Jeremy Chambers Jeremy Grosser +Jeremy Huntwork Jeremy Price Jeremy Qian Jeremy Unruh @@ -933,13 +1010,16 @@ Ji.Zhilong Jian Liao Jian Zhang Jiang Jinyang +Jianyong Wu Jie Luo Jie Ma Jihyun Hwang Jilles Oldenbeuving Jim Alateras +Jim Carroll Jim Ehrismann Jim Galasyn +Jim Lin Jim Minter Jim Perrin Jimmy Cuadra @@ -951,6 +1031,7 @@ Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +Joakim Roubert Joao Fernandes Joao Trindade Joe Beda @@ -1012,6 +1093,7 @@ Joost Cassee Jordan Arentsen Jordan Jennings Jordan Sissel +Jordi Massaguer Pla Jorge Marin Jorit Kleine-Möllhoff Jose Diaz-Gonzalez @@ -1044,12 +1126,15 @@ Julien Pervillé Julien Pivotto Julio Guerra Julio Montes +Jun Du Jun-Ru Chang +junxu Jussi Nummelin Justas Brazauskas Justen Martin Justin Cormack Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> Justin Menga Justin Plock Justin Simonelis @@ -1062,6 +1147,7 @@ Jörg Thalheim K. Heller Kai Blin Kai Qiang Wu (Kennan) +Kaijie Chen Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1082,6 +1168,7 @@ Kawsar Saiyeed Kay Yan kayrus Kazuhiro Sera +Kazuyoshi Kato Ke Li Ke Xu Kei Ohmura @@ -1096,6 +1183,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1122,20 +1210,22 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konrad Ponichtera Konstantin Gribov Konstantin L Konstantin Pelykh +Kostadin Plachkov Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova Krystian Wojcicki -Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Squizzato Kyle Wuolle kyu Lachlan Coote @@ -1151,20 +1241,25 @@ Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros Laura Frank +Laurent Bernaille Laurent Erignoux Laurie Voss Leandro Siqueira +Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han -leeplay Lei Gong Lei Jitang +Leiiwang Len Weincier Lennie Leo Gallucci +Leonardo Nodari +Leonardo Taccari Leszek Kowalski Levi Blackstone Levi Gross +Levi Harrison Lewis Daly Lewis Marshall Lewis Peckover @@ -1173,11 +1268,12 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh +liangwei Liao Qingwei Lifubang Lihua Tang Lily Guo -limsy +limeidan Lin Lu LingFaKe Linus Heckemann @@ -1207,6 +1303,7 @@ Lucas Chi Lucas Molas Lucas Silvestre Luciano Mores +Luis Henrique Mulinari Luis Martínez de Bartolomé Izquierdo Luiz Svoboda Lukas Heeren @@ -1222,7 +1319,7 @@ Ma Shimiao Mabin Madhan Raj Mookkandy Madhav Puri -Madhu Venugopal +Madhu Venugopal Mageee Mahesh Tiyyagura malnick @@ -1255,12 +1352,14 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Feit Mark Jeromin Mark McGranaghan Mark McKinstry Mark Milstein Mark Oates Mark Parker +Mark Vainomaa Mark West Markan Patel Marko Mikulicic @@ -1269,11 +1368,14 @@ Markus Fix Markus Kortlang Martijn Dwars Martijn van Oosterhout +Martin Braun +Martin Dojcak Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen Martin Muzatko Martin Redmond +Maru Newby Mary Anthony Masahito Zembutsu Masato Ohba @@ -1284,13 +1386,16 @@ Mathias Monnerville Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent +Mathieu Paturel Matt Apperson Matt Bachmann +Matt Bajor Matt Bentley Matt Haggard Matt Hoyle Matt McCormick Matt Moore +Matt Morrison <3maven@gmail.com> Matt Richardson Matt Rickard Matt Robenolt @@ -1305,12 +1410,14 @@ Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke +Matthieu Fronton Matthieu Hauglustaine Mattias Jernberg Mauricio Garavaglia mauriyouth Max Harmathy Max Shytikov +Max Timchenko Maxim Fedchyshyn Maxim Ivanov Maxim Kulkin @@ -1324,14 +1431,16 @@ Megan Kostick Mehul Kar Mei ChunTao Mengdi Gao +Menghui Chen Mert Yazıcıoğlu mgniu Micah Zoltu Michael A. Smith +Michael Beskin Michael Bridgen Michael Brown Michael Chiang -Michael Crosby +Michael Crosby Michael Currie Michael Friis Michael Gorsuch @@ -1340,6 +1449,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kuehn Michael Käufl Michael Neale Michael Nussbaum @@ -1349,23 +1459,29 @@ Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies +Michael Weidmann Michael West Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala +Michal Kostrzewa Michal Minář +Michal Rostecki Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz Michał Gryko +Michał Kosek Michiel de Jong Mickaël Fortunato Mickaël Remars Miguel Angel Fernández Miguel Morales +Miguel Perez Mihai Borobocea Mihuleacc Sergiu +Mikael Davranche Mike Brown Mike Bush Mike Casas @@ -1384,6 +1500,7 @@ Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1392,7 +1509,7 @@ Misty Stanley-Jones Mitch Capper Mizuki Urushida mlarcher -Mohammad Banikazemi +Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni @@ -1406,6 +1523,7 @@ Moysés Borges mrfly Mrunal Patel Muayyad Alsadi +Muhammad Zohaib Aslam Mustafa Akın Muthukumar R Máximo Cuadros @@ -1422,6 +1540,8 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Carlson +Nathan Herald Nathan Hsieh Nathan Kleyn Nathan LeClaire @@ -1445,6 +1565,7 @@ Nick Payne Nick Russo Nick Stenning Nick Stinemates +Nick Wood NickrenREN Nicola Kabar Nicolas Borboën @@ -1455,6 +1576,7 @@ Nicolas Kaiser Nicolas Sterchele Nicolas V Castet Nicolás Hock Isaza +Niel Drummond Nigel Poulton Nik Nyby Nikhil Chawla @@ -1472,6 +1594,7 @@ noducks Nolan Darilek Noriki Nakamura nponeccop +Nurahmadie Nuutti Kotivuori nzwsch O.S. Tezer @@ -1489,7 +1612,9 @@ Olle Jonsson Olli Janatuinen Olly Pomeroy Omri Shiv +Onur Filiz Oriol Francès +Oscar Bonilla <6f6231@gmail.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1502,10 +1627,12 @@ Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine +Patrick Haas Patrick Hemmer Patrick Stapleton Patrik Cyvoct pattichen +Paul "TBBle" Hampson Paul paul Paul Annesley @@ -1520,6 +1647,7 @@ Paul Liljenberg Paul Morie Paul Nasrat Paul Weaver +Paulo Gomes Paulo Ribeiro Pavel Lobashov Pavel Matěja @@ -1530,6 +1658,7 @@ Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Pawel Konczalski +Paweł Gronowski Peeyush Gupta Peggy Li Pei Su @@ -1537,6 +1666,7 @@ Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com +Pete Woods Peter Bourgon Peter Braden Peter Bücker @@ -1552,8 +1682,10 @@ Peter Salvatore Peter Volpe Peter Waller Petr Švihlík +Petros Angelatos Phil -Phil Estes +Phil Estes +Phil Sphicas Phil Spitler Philip Alexander Etling Philip Monroe @@ -1570,21 +1702,25 @@ Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan -pixelistik +Piotr Karbowski Porjo Poul Kjeldager Sørensen Pradeep Chhetri Pradip Dhara +Pradipta Kr. Banerjee Prasanna Gautam Pratik Karki Prayag Verma Priya Wadhwa Projjol Banerji Przemek Hejman +Puneet Pruthi Pure White pysqz Qiang Huang +Qin TianHuan Qinglan Peng +Quan Tian qudongfang Quentin Brossard Quentin Perez @@ -1607,6 +1743,7 @@ Ramon van Alteren RaviTeja Pothana Ray Tsang ReadmeCritic +realityone Recursive Madman Reficul Regan McCooey @@ -1617,9 +1754,9 @@ Renaud Gaubert Rhys Hiltner Ri Xu Ricardo N Feliciano +Rich Horwood Rich Moyse Rich Seymour -Richard Richard Burnison Richard Harvey Richard Mathie @@ -1634,12 +1771,14 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle Robert Obryk Robert Schneider +Robert Shade Robert Stern Robert Terhaar Robert Wallis @@ -1652,6 +1791,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1666,11 +1806,14 @@ Roma Sokolov Roman Dudin Roman Mazur Roman Strashkin +Roman Volosatovs +Roman Zabaluev Ron Smits Ron Williams Rong Gao Rong Zhang Rongxiang Song +Rony Weng root root root @@ -1690,13 +1833,16 @@ Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett +Ryan Barry Ryan Belgrave +Ryan Campbell Ryan Detzel Ryan Fowler Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto +Ryan Shea Ryan Simmen Ryan Stelly Ryan Thomas @@ -1706,9 +1852,9 @@ Ryan Zhang ryancooper7 RyanDeng Ryo Nakao +Ryoga Saito Rémy Greinhofer s. rannou -s00318865 Sabin Basyal Sachin Joshi Sagar Hani @@ -1728,8 +1874,9 @@ Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau -Samuel Karp +Samuel Karp Samuel PHAN +sanchayanghosh Sandeep Bansal Sankar சங்கர் Sanket Saurav @@ -1745,6 +1892,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Percival Scott Stamp Scott Walls sdreyesg @@ -1757,6 +1905,9 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran SeongJae Park @@ -1776,12 +1927,15 @@ shaunol Shawn Landden Shawn Siefkas shawnhe +Shayan Pooya Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song +Shengjing Zhu Shev Yan Shih-Yuan Lee +Shihao Xia Shijiang Wei Shijun Qin Shishir Mahajan @@ -1790,14 +1944,13 @@ Shourya Sarcar Shu-Wai Chow shuai-z Shukui Yang -Shuwei Hao Sian Lerk Lau +Siarhei Rasiukevich Sidhartha Mani sidharthamani Silas Sewell Silvan Jegen Simão Reis -Simei He Simon Barendse Simon Eskildsen Simon Ferquel @@ -1808,13 +1961,16 @@ Simon Vikstrom Sindhu S Sjoerd Langkemper skanehira +Smark Meng Solganik Alexander Solomon Hykes Song Gao Soshi Katsuta +Sotiris Salloumis Soulou Spencer Brown Spencer Smith +Spike Curtis Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu @@ -1830,6 +1986,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Steffen Butzer Stephan Spindler Stephen Benjamin Stephen Crosby @@ -1848,7 +2005,9 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor +Stéphane Este-Gracias Stig Larsson +Su Wang Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> @@ -1858,15 +2017,16 @@ Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade -Sylvain Baubeau +Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq +Sören Tempel Tabakhase Tadej Janež -TAGOMORI Satoshi +Takuto Sato tang0th Tangi Colin Tatsuki Sugiura @@ -1877,18 +2037,21 @@ Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju Tejesh Mehta +Terry Chu terryding77 <550147740@qq.com> -tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix +Thiago Alves Silva Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Gazagnaire +Thomas Graf Thomas Grainger Thomas Hansen +Thomas Ledos Thomas Leonard Thomas Léveil Thomas Orozco @@ -1899,11 +2062,13 @@ Thomas Swift Thomas Tanaka Thomas Texier Ti Zhou +Tiago Seabra Tianon Gravi Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low +Till Claassen Till Wegmüller Tim Tim Bart @@ -1915,11 +2080,14 @@ Tim Potter Tim Ruffles Tim Smith Tim Terhorst +Tim Wagner Tim Wang Tim Waugh Tim Wraight Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> timfeirg +Timo Rothenpieler Timothy Hobbs tjwebb123 tobe @@ -1928,6 +2096,7 @@ Tobias Bradtke Tobias Gesellchen Tobias Klauser Tobias Munk +Tobias Pfandzelter Tobias Schmidt Tobias Schwab Todd Crane @@ -1941,25 +2110,33 @@ Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel +Tom Parker Tom Sweeney Tom Wilkie Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz +Tomek Mańko Tommaso Visconti +Tomoya Tabuchi Tomáš Hrčka +tonic Tonny Xu Tony Abboud Tony Daws Tony Miller toogley Torstein Husebø +Toshiaki Makita Tõnis Tiigi Trace Andreason tracylihui <793912329@qq.com> -Trapier Marshall +Trapier Marshall Travis Cline Travis Thieman Trent Ogren @@ -1969,6 +2146,8 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton +Tudor Brindus +Ty Alexander Tycho Andersen Tyler Brock Tyler Brown @@ -1979,6 +2158,7 @@ Umesh Yadav Utz Bacher vagrant Vaidas Jablonskis +Valentin Kulesh vanderliang Velko Ivanov Veres Lajos @@ -1992,12 +2172,13 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K +Vikas Choudhary Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts -Vincent Bernat +Vincent Bernat Vincent Boulineau Vincent Demeester Vincent Giersch @@ -2017,9 +2198,9 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus +Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) -waitingkuo Walter Leibbrandt Walter Stanish Wang Chao @@ -2034,6 +2215,7 @@ wanghuaiqing Ward Vandewege WarheadsSE Wassim Dhif +Wataru Ishida Wayne Chang Wayne Song Weerasak Chongnguluam @@ -2048,7 +2230,6 @@ Wendel Fleming Wenjun Tang Wenkai Yin wenlxie -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang @@ -2068,16 +2249,22 @@ William Thurston Wilson Júnior Wing-Kam Wong WiseTrem +Wolfgang Nagele Wolfgang Powisch Wonjun Kim +WuLonghui xamyzhao +Xia Wu Xian Chaobo Xianglin Gao +Xianjie Xianlu Bird Xiao YongBiao +Xiao Zhang XiaoBing Jiang Xiaodong Liu Xiaodong Zhang +Xiaohua Ding Xiaoxi He Xiaoxu Chen Xiaoyu Zhang @@ -2092,12 +2279,16 @@ Xuecong Liao xuzhaokui Yadnyawalkya Tale Yahya +yalpul YAMADA Tsuyoshi Yamasaki Masahide Yan Feng +Yan Zhu Yang Bai +Yang Li Yang Pengfei yangchenliang +Yann Autissier Yanqiang Miao Yao Zaiyong Yash Murty @@ -2117,6 +2308,7 @@ Yosef Fertel You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF +Youfu Zhang Yu Changchun Yu Chengxia Yu Peng @@ -2124,14 +2316,18 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yue Zhang +Yufei Xiong Yuhao Fang Yuichiro Kaneko +YujiOshima Yunxiang Huang Yurii Rashkovskii Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> Yves Junqueira Zac Dover Zach Borboa +Zach Gershman Zachary Jaffee Zain Memon Zaiste! @@ -2147,6 +2343,7 @@ Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo Zhenhai Gao Zhenkun Bi +ZhiPeng Lu zhipengzuo Zhou Hao Zhoulin Xie @@ -2164,7 +2361,6 @@ Zou Yu zqh Zuhayr Elahi Zunayed Ali -Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 @@ -2173,3 +2369,4 @@ Zunayed Ali 慕陶 搏通 黄艳红00139573 +정재영 diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 1565e2af64..bee9b875a8 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.41" + DefaultVersion = "1.42" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index c24f57bc9a..afe7a8c371 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.41" +basePath: "/v1.42" info: title: "Docker Engine API" - version: "1.41" + version: "1.42" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.41) is used. - For example, calling `/info` is the same as calling `/v1.41/info`. Using the + If you omit the version-prefix, the current version of the API (v1.42) is used. + For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -202,24 +202,74 @@ definitions: MountPoint: type: "object" - description: "A mount point inside a container" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. properties: Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. type: "string" + example: "myvolume" Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. type: "string" + example: "/usr/share/nginx/html/" Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). type: "string" + example: "local" Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). type: "string" + example: "z" RW: + description: | + Whether the mount is mounted writable (read-write). type: "boolean" + example: true Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. type: "string" + example: "" DeviceMapping: type: "object" @@ -302,12 +352,14 @@ definitions: - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" + - "cluster" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -332,6 +384,10 @@ definitions: description: "Disable recursive bind mount." type: "boolean" default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -382,11 +438,13 @@ definitions: type: "string" description: | - Empty string means not to restart + - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" + - "no" - "always" - "unless-stopped" - "on-failure" @@ -527,19 +585,13 @@ definitions: type: "array" items: $ref: "#/definitions/DeviceRequest" - KernelMemory: + KernelMemoryTCP: description: | - Kernel memory limit in bytes. - -


+ Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. - type: "integer" - format: "int64" - example: 209715200 - KernelMemoryTCP: - description: "Hard limit for kernel TCP buffer memory (in bytes)." + This field is omitted when empty. type: "integer" format: "int64" MemoryReservation: @@ -723,11 +775,13 @@ definitions: The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Retries: description: | The number of consecutive failures needed to consider a container as @@ -739,11 +793,13 @@ definitions: health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Health: description: | Health stores information about the container's healthcheck results. type: "object" + x-nullable: true properties: Status: description: | @@ -769,13 +825,13 @@ definitions: description: | Log contains the last few results (oldest first) items: - x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" + x-nullable: true properties: Start: description: | @@ -910,6 +966,16 @@ definitions: type: "array" items: $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 # Applicable to UNIX platforms CapAdd: @@ -1024,8 +1090,9 @@ definitions: description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. items: type: "string" StorageOpt: @@ -1073,15 +1140,6 @@ definitions: type: "string" description: "Runtime to use with this container." # Applicable to Windows - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. (Windows only) - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 Isolation: type: "string" description: | @@ -1106,14 +1164,25 @@ definitions: type: "string" ContainerConfig: - description: "Configuration for a container that is portable between hosts" + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. type: "object" properties: Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. type: "string" + example: "439f4e91bd1d" Domainname: - description: "The domain name to use for the container." + description: | + The domain name to use for the container. type: "string" User: description: "The user that commands are run as inside the container." @@ -1136,11 +1205,16 @@ definitions: `{"/": {}}` type: "object" + x-nullable: true additionalProperties: type: "object" enum: - {} default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. @@ -1162,21 +1236,29 @@ definitions: type: "array" items: type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" + example: ["/bin/sh"] Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" + default: false + example: false + x-nullable: true Image: description: | - The name of the image to use when creating the container/ + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. type: "string" + example: "example-image:1.0" Volumes: description: | An object mapping mount point paths inside the container to empty @@ -1190,6 +1272,7 @@ definitions: WorkingDir: description: "The working directory for commands to run in." type: "string" + example: "/public/" Entrypoint: description: | The entry point for the container as a string or an array of strings. @@ -1200,38 +1283,50 @@ definitions: type: "array" items: type: "string" + example: [] NetworkDisabled: description: "Disable networking for the container." type: "boolean" + x-nullable: true MacAddress: description: "MAC address of the container." type: "string" + x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" + x-nullable: true items: type: "string" + example: [] Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" - default: "SIGTERM" + example: "SIGTERM" + x-nullable: true StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 + x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" + x-nullable: true items: type: "string" + example: ["/bin/sh", "-c"] NetworkingConfig: description: | @@ -1274,7 +1369,7 @@ definitions: type: "object" properties: Bridge: - description: Name of the network'a bridge (for example, `docker0`). + description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: @@ -1488,107 +1583,215 @@ definitions: example: "4443" GraphDriverData: - description: "Information about a container's graph driver." + description: | + Information about the storage driver used to store the container's and + image's filesystem. type: "object" required: [Name, Data] properties: Name: + description: "Name of the storage driver." type: "string" x-nullable: false + example: "overlay2" Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. type: "object" x-nullable: false additionalProperties: type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } - Image: + ImageInspect: + description: | + Information about an image in the local image cache. type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" Comment: + description: | + Optional message that was set when committing or importing the image. type: "string" x-nullable: false + example: "" Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "20.10.7" Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. type: "string" x-nullable: false + example: "" Config: $ref: "#/definitions/ContainerConfig" Architecture: + description: | + Hardware CPU architecture that the image runs on. type: "string" x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" Os: + description: | + Operating System the image is built to run on. type: "string" x-nullable: false + example: "linux" OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). type: "string" + example: "" + x-nullable: true Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" format: "int64" x-nullable: false + example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: + description: | + Information about the image's RootFS, including the layer IDs. type: "object" required: [Type] properties: Type: type: "string" x-nullable: false + example: "layers" Layers: type: "array" items: type: "string" - BaseLayer: - type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. type: "object" properties: LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. type: "string" format: "dateTime" - + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true ImageSummary: type: "object" required: @@ -1604,41 +1807,120 @@ definitions: - Containers properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" x-nullable: false items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" x-nullable: false items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). type: "integer" x-nullable: false + example: "1644009612" Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" + format: "int64" x-nullable: false + example: 172064416 SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. type: "integer" + format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" + format: "int64" x-nullable: false + example: 172064416 Labels: + description: "User-defined key/value metadata." type: "object" x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. x-nullable: false type: "integer" + example: 2 AuthConfig: type: "object" @@ -1680,18 +1962,22 @@ definitions: type: "string" description: "Name of the volume." x-nullable: false + example: "tardis" Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false + example: "custom" Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false + example: "/var/lib/docker/volumes/tardis" CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" Status: type: "object" description: | @@ -1703,12 +1989,17 @@ definitions: does not support this feature. additionalProperties: type: "object" + example: + hello: "world" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Scope: type: "string" description: | @@ -1717,15 +2008,23 @@ definitions: default: "local" x-nullable: false enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" UsageData: type: "object" x-nullable: true + x-go-name: "UsageData" required: [Size, RefCount] description: | Usage details about the volume. This information is used by the @@ -1733,6 +2032,7 @@ definitions: properties: Size: type: "integer" + format: "int64" default: -1 description: | Amount of disk space used by the volume (in bytes). This information @@ -1742,23 +2042,71 @@ definitions: x-nullable: false RefCount: type: "integer" + format: "int64" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - CreatedAt: "2016-06-07T20:31:11.853781916Z" + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] Network: type: "object" @@ -1846,15 +2194,27 @@ definitions: ``` type: "array" items: - type: "object" - additionalProperties: - type: "string" + $ref: "#/definitions/IPAMConfig" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: type: "object" properties: @@ -1902,8 +2262,19 @@ definitions: Parent: description: | ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. type: "string" - example: "hw53o5aio51xtltp5xjp8v7fx" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] Type: type: "string" description: | @@ -1972,6 +2343,8 @@ definitions: type: "string" error: type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" status: type: "string" progress: @@ -2218,6 +2591,25 @@ definitions: type: "string" x-nullable: false + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + Plugin: description: "A plugin for the Engine API" type: "object" @@ -3000,19 +3392,7 @@ definitions: PluginPrivilege: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | @@ -3661,6 +4041,7 @@ definitions: ServiceSpec: description: "User modifiable configuration for a service." + type: object properties: Name: description: "Name of the service." @@ -4052,73 +4433,71 @@ definitions: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: - type: "array" - items: - type: "object" - properties: - Id: - description: "The ID of this container" + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" Driver: description: "Driver represents a driver (network, logging, secrets)." @@ -4240,6 +4619,7 @@ definitions: ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" + x-nullable: true properties: Status: description: | @@ -4297,9 +4677,52 @@ definitions: type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: - x-nullable: true $ref: "#/definitions/Health" + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + SystemVersion: type: "object" description: | @@ -4396,7 +4819,6 @@ definitions: type: "string" example: "2020-06-22T15:49:27.000000000+00:00" - SystemInfo: type: "object" properties: @@ -4481,14 +4903,13 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: + KernelMemoryTCP: description: | - Indicates if the host has kernel memory limit support enabled. - -


+ Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. type: "boolean" example: true CpuCfsPeriod: @@ -5198,6 +5619,7 @@ definitions: PeerNode: description: "Represents a peer-node in the swarm" + type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." @@ -5229,6 +5651,394 @@ definitions: additionalProperties: type: "string" + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + paths: /containers/json: get: @@ -5291,7 +6101,9 @@ paths: 200: description: "no error" schema: - $ref: "#/definitions/ContainerSummary" + type: "array" + items: + $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" @@ -5517,7 +6329,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -5611,25 +6422,7 @@ paths: 201: description: "Container created successfully" schema: - type: "object" - title: "ContainerCreateResponse" - description: "OK response to ContainerCreate operation" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] + $ref: "#/definitions/ContainerCreateResponse" 400: description: "bad parameter" schema: @@ -5679,7 +6472,6 @@ paths: items: type: "string" State: - x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" @@ -5810,7 +6602,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -6008,6 +6799,9 @@ paths: Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ContainerLogs" responses: 200: @@ -6421,6 +7215,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6450,6 +7249,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6491,7 +7295,8 @@ paths: type: "string" - name: "signal" in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" default: "SIGKILL" tags: ["Container"] @@ -6555,7 +7360,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6709,7 +7513,8 @@ paths: ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream over the hijacked connected is multiplexed to separate out + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. @@ -6753,6 +7558,7 @@ paths: operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 101: description: "no error, hints proxy about hijacking" @@ -6894,22 +7700,11 @@ paths: 200: description: "The container has exit." schema: - type: "object" - title: "ContainerWaitResponse" - description: "OK response to ContainerWait operation" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - Error: - description: "container waiting error, if any" - type: "object" - properties: - Message: - description: "Details of an error" - type: "string" + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -6930,9 +7725,14 @@ paths: - name: "condition" in: "query" description: | - Wait until a container state reaches the given condition, either - 'not-running' (default), 'next-exit', or 'removed'. + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" default: "not-running" tags: ["Container"] /containers/{id}: @@ -7004,21 +7804,11 @@ paths: type: "string" description: | A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7053,17 +7843,7 @@ paths: 400: description: "Bad parameter" schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7089,7 +7869,10 @@ paths: tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: @@ -7099,6 +7882,9 @@ paths: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: @@ -7200,35 +7986,6 @@ paths: type: "array" items: $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 500: description: "server error" schema: @@ -7253,6 +8010,11 @@ paths: - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." @@ -7551,9 +8313,36 @@ paths: Refer to the [authentication section](#section/Authentication) for details. type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" - name: "platform" in: "query" - description: "Platform in the format os[/arch[/variant]]" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. type: "string" default: "" tags: ["Image"] @@ -7568,84 +8357,7 @@ paths: 200: description: "No error" schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + $ref: "#/definitions/ImageInspect" 404: description: "No such image" schema: @@ -8015,6 +8727,10 @@ paths: IdentityToken: "9cbaf023786cd7..." 204: description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: @@ -8076,10 +8792,27 @@ paths: description: "Max API Version the server supports" Builder-Version: type: "string" - description: "Default version of docker image builder" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8119,6 +8852,13 @@ paths: Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8225,44 +8965,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - Action: - description: "The type of event" - type: "string" - Actor: - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - Attributes: - description: "Various key/value attributes of the object, depending on its type" - type: "object" - additionalProperties: - type: "string" - time: - description: "Timestamp of event" - type: "integer" - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - examples: - application/json: - Type: "container" - Action: "create" - Actor: - ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - com.example.some-label: "some-label-value" - image: "alpine" - name: "my-container" - time: 1461943101 + $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: @@ -8390,10 +9093,43 @@ paths: UsageData: Size: 10920104 RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: @@ -8544,6 +9280,7 @@ paths: description: "Exec configuration" schema: type: "object" + title: "ExecConfig" properties: AttachStdin: type: "boolean" @@ -8554,6 +9291,15 @@ paths: AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 DetachKeys: type: "string" description: | @@ -8618,6 +9364,7 @@ paths: - "application/json" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "No error" @@ -8634,6 +9381,7 @@ paths: in: "body" schema: type: "object" + title: "ExecStartConfig" properties: Detach: type: "boolean" @@ -8641,9 +9389,19 @@ paths: Tty: type: "boolean" description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 example: Detach: false - Tty: false + Tty: true + ConsoleSize: [80, 64] - name: "id" in: "path" description: "Exec instance ID" @@ -8769,41 +9527,7 @@ paths: 200: description: "Summary volume data that matches the query" schema: - type: "object" - title: "VolumeListResponse" - description: "Volume list response" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - - examples: - application/json: - Volumes: - - CreatedAt: "2017-07-19T12:00:26Z" - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] + $ref: "#/definitions/VolumeListResponse" 500: description: "Server error" schema: @@ -8848,38 +9572,7 @@ paths: required: true description: "Volume configuration" schema: - type: "object" - description: "Volume configuration" - title: "VolumeConfig" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" + $ref: "#/definitions/VolumeCreateOptions" tags: ["Volume"] /volumes/{name}: @@ -8908,6 +9601,64 @@ paths: type: "string" tags: ["Volume"] + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." @@ -8939,6 +9690,7 @@ paths: type: "boolean" default: false tags: ["Volume"] + /volumes/prune: post: summary: "Delete unused volumes" @@ -8953,6 +9705,7 @@ paths: Available filters: - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. type: "string" responses: 200: @@ -9176,6 +9929,7 @@ paths: required: true schema: type: "object" + title: "NetworkCreateRequest" required: ["Name"] properties: Name: @@ -9286,6 +10040,7 @@ paths: required: true schema: type: "object" + title: "NetworkConnectRequest" properties: Container: type: "string" @@ -9332,6 +10087,7 @@ paths: required: true schema: type: "object" + title: "NetworkDisconnectRequest" properties: Container: type: "string" @@ -9416,20 +10172,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - title: "PluginPrivilegeItem" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9505,19 +10248,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9689,19 +10420,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9991,6 +10710,7 @@ paths: required: true schema: type: "object" + title: "SwarmInitRequest" properties: ListenAddr: description: | @@ -10089,6 +10809,7 @@ paths: required: true schema: type: "object" + title: "SwarmJoinRequest" properties: ListenAddr: description: | @@ -10109,7 +10830,7 @@ paths: description: | Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same addres + like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope @@ -10249,6 +10970,7 @@ paths: required: true schema: type: "object" + title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." @@ -10611,6 +11333,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ServiceLogs" responses: 200: @@ -10866,6 +11591,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "logs returned as a stream in response body" @@ -11360,67 +12088,7 @@ paths: 200: description: "descriptor and platform information" schema: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - properties: - Descriptor: - type: "object" - description: | - A descriptor struct containing digest, media type, and size. - properties: - MediaType: - type: "string" - Size: - type: "integer" - format: "int64" - Digest: - type: "string" - URLs: - type: "array" - items: - type: "string" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - OSVersion: - type: "string" - OSFeatures: - type: "array" - items: - type: "string" - Variant: - type: "string" - Features: - type: "array" - items: - type: "string" - examples: - application/json: - Descriptor: - MediaType: "application/vnd.docker.distribution.manifest.v2+json" - Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - Size: 3987495 - URLs: - - "" - Platforms: - - Architecture: "amd64" - OS: "linux" - OSVersion: "" - OSFeatures: - - "" - Variant: "" - Features: - - "" + $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 9c464b73e2..97aca02306 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -59,7 +59,6 @@ type ContainerExecInspect struct { // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { - Quiet bool Size bool All bool Latest bool @@ -113,10 +112,16 @@ type NetworkListOptions struct { Filters filters.Args } +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + // HijackedResponse holds connection information for a hijacked request. type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader + mediaType string + Conn net.Conn + Reader *bufio.Reader } // Close closes the hijacked connection and reader. @@ -124,6 +129,15 @@ func (h *HijackedResponse) Close() { h.Conn.Close() } +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + // CloseWriter is an interface that implements structs // that close input streams to prevent from writing. type CloseWriter interface { @@ -236,10 +250,20 @@ type ImageImportOptions struct { Platform string // Platform is the target platform of the image } -// ImageListOptions holds parameters to filter the list of images with. +// ImageListOptions holds parameters to list images with. type ImageListOptions struct { - All bool + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool } // ImageLoadResponse returns information to the client about a load process. diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 3dd133a3a5..7689f38b33 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -33,6 +33,7 @@ type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard error AttachStdout bool // Attach the standard output diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index f767195b94..077583e66c 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "io" "time" "github.com/docker/docker/api/types/strslice" @@ -13,6 +14,24 @@ import ( // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -34,6 +53,14 @@ type HealthConfig struct { Retries int `json:",omitempty"` } +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f84d..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae669..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 0000000000..aa0e7f7d07 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go new file mode 100644 index 0000000000..0cb70e3638 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/deprecated.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// +// Deprecated: use CreateResponse +type ContainerCreateCreatedBody = CreateResponse + +// ContainerWaitOKBody OK response to ContainerWait operation +// +// Deprecated: use WaitResponse +type ContainerWaitOKBody = WaitResponse + +// ContainerWaitOKBodyError container waiting error, if any +// +// Deprecated: use WaitExitError +type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 2d1cbaa9ab..100f434ce7 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -13,19 +13,26 @@ import ( // CgroupnsMode represents the cgroup namespace mode of the container type CgroupnsMode string +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + // IsPrivate indicates whether the container uses its own private cgroup namespace func (c CgroupnsMode) IsPrivate() bool { - return c == "private" + return c == CgroupnsModePrivate } // IsHost indicates whether the container shares the host's cgroup namespace func (c CgroupnsMode) IsHost() bool { - return c == "host" + return c == CgroupnsModeHost } // IsEmpty indicates whether the container cgroup namespace mode is unset func (c CgroupnsMode) IsEmpty() bool { - return c == "" + return c == CgroupnsModeEmpty } // Valid indicates whether the cgroup namespace mode is valid @@ -37,60 +44,69 @@ func (c CgroupnsMode) Valid() bool { // values are platform specific type Isolation string +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + // IsDefault indicates the default isolation technology of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV } // IsProcess indicates the use of process isolation func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess } -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - // IpcMode represents the container ipc stack. type IpcMode string +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + // IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return n == "private" + return n == IPCModePrivate } // IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { - return n == "host" + return n == IPCModeHost } // IsShareable indicates whether the container's ipc namespace can be shared with another container. func (n IpcMode) IsShareable() bool { - return n == "shareable" + return n == IPCModeShareable } // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" + return strings.HasPrefix(string(n), string(IPCModeContainer)+":") } // IsNone indicates whether container IpcMode is set to "none". func (n IpcMode) IsNone() bool { - return n == "none" + return n == IPCModeNone } // IsEmpty indicates whether container IpcMode is empty @@ -105,9 +121,8 @@ func (n IpcMode) Valid() bool { // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] + if n.IsContainer() { + return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") } return "" } @@ -326,7 +341,7 @@ type LogMode string // Available logging modes const ( - LogModeUnset = "" + LogModeUnset LogMode = "" LogModeBlocking LogMode = "blocking" LogModeNonBlock LogMode = "non-blocking" ) @@ -361,14 +376,17 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count @@ -399,6 +417,7 @@ type HostConfig struct { AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container @@ -427,8 +446,7 @@ type HostConfig struct { Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 0000000000..ab56d4eed8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 0000000000..84fc6afddc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/deprecated.go b/vendor/github.com/docker/docker/api/types/deprecated.go new file mode 100644 index 0000000000..216d1df0ff --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/deprecated.go @@ -0,0 +1,14 @@ +package types // import "github.com/docker/docker/api/types" + +import "github.com/docker/docker/api/types/volume" + +// Volume volume +// +// Deprecated: use github.com/docker/docker/api/types/volume.Volume +type Volume = volume.Volume + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// Deprecated: use github.com/docker/docker/api/types/volume.UsageData +type VolumeUsageData = volume.UsageData diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index aa8fba8154..9fe07e26fd 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,33 +1,26 @@ package events // import "github.com/docker/docker/api/types/events" +// Type is used for event-types. +type Type = string + +// List of known event types. const ( - // BuilderEventType is the event type that the builder generates - BuilderEventType = "builder" - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) // Actor describes something that generates events, // like a container, or a network, or a volume. -// It has a defined name and a set or attributes. +// It has a defined name and a set of attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { @@ -39,11 +32,11 @@ type Actor struct { type Message struct { // Deprecated information from JSONMessage. // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` + Status string `json:"status,omitempty"` // Deprecated: use Action instead. + ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. + From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - Type string + Type Type Action string Actor Actor // Engine events are local scope. Cluster events are swarm scope. diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 63db4c617b..f8fe794074 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,4 +1,5 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of +/* +Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ package filters // import "github.com/docker/docker/api/types/filters" @@ -9,6 +10,7 @@ import ( "strings" "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" ) // Args stores a mapping of keys to a set of multiple values. @@ -97,7 +99,7 @@ func FromJSON(p string) (Args, error) { // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err + return args, invalidFilter{errors.Wrap(err, "invalid filter")} } args.fields = deprecatedArgs(deprecated) @@ -244,10 +246,10 @@ func (args Args) Contains(field string) bool { return ok } -type invalidFilter string +type invalidFilter struct{ error } func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" + return e.error.Error() } func (invalidFilter) InvalidParameter() {} @@ -257,7 +259,7 @@ func (invalidFilter) InvalidParameter() {} func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { - return invalidFilter(name) + return invalidFilter{errors.New("invalid filter '" + name + "'")} } } return nil diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go index 4d9bf1c62c..ce3deb331c 100644 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -3,15 +3,21 @@ package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// GraphDriverData Information about a container's graph driver. +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// // swagger:model GraphDriverData type GraphDriverData struct { - // data + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // // Required: true Data map[string]string `json:"Data"` - // name + // Name of the storage driver. // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go index e145b3dcfc..90b983a25c 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -7,43 +7,91 @@ package types // swagger:model ImageSummary type ImageSummary struct { - // containers + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // // Required: true Containers int64 `json:"Containers"` - // created + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // // Required: true Created int64 `json:"Created"` - // Id + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // // Required: true ID string `json:"Id"` - // labels + // User-defined key/value metadata. // Required: true Labels map[string]string `json:"Labels"` - // parent Id + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // // Required: true ParentID string `json:"ParentId"` - // repo digests + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // // Required: true RepoDigests []string `json:"RepoDigests"` - // repo tags + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // // Required: true RepoTags []string `json:"RepoTags"` - // shared size + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // // Required: true SharedSize int64 `json:"SharedSize"` - // size + // Total size of the image including all layers it is composed of. + // // Required: true Size int64 `json:"Size"` - // virtual size + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + // // Required: true VirtualSize int64 `json:"VirtualSize"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 443b8d07a9..ac4ce62231 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -17,6 +17,8 @@ const ( TypeTmpfs Type = "tmpfs" // TypeNamedPipe is the type for mounting Windows named pipes TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" ) // Mount represents a mount (volume). @@ -30,9 +32,10 @@ type Mount struct { ReadOnly bool `json:",omitempty"` Consistency Consistency `json:",omitempty"` - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. @@ -79,8 +82,9 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. @@ -129,3 +133,8 @@ type TmpfsOptions struct { // Some of these may be straightforward to add, but others, such as // uid/gid have implications in a clustered system. } + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 53e47084c8..62a88f5be8 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -45,31 +45,32 @@ func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { // IndexInfo contains information about a registry // // RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } // -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } type IndexInfo struct { // Name is the name of the registry, such as "docker.io" Name string diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index ef020f458b..5ded7dba8a 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,12 +1,20 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "strconv" + "time" +) // Version represents the internal object version. type Version struct { Index uint64 `json:",omitempty"` } +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + // Meta is a base object inherited by most of the other once. type Meta struct { Version Version `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 1e30f5fa10..bb98d5eedc 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -53,6 +53,7 @@ type NodeDescription struct { Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). @@ -68,6 +69,21 @@ type EngineDescription struct { Plugins []PluginDescription `json:",omitempty"` } +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + // PluginDescription represents the description of an engine plugin. type PluginDescription struct { Type string `json:",omitempty"` @@ -113,3 +129,11 @@ const ( // NodeStateDisconnected DISCONNECTED NodeStateDisconnected NodeState = "disconnected" ) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b25f999646..3eae4b9b29 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -213,6 +213,16 @@ type Info struct { Warnings []string `json:",omitempty"` } +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + // Peer represents a peer. type Peer struct { NodeID string diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a6f7ab7b5c..ad3eeca0b7 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -62,6 +62,11 @@ type Task struct { // used to determine which Tasks belong to which run of the job. This field // is absent if the Service mode is Replicated or Global. JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment } // TaskSpec represents the spec of a task. @@ -204,3 +209,17 @@ type ContainerStatus struct { type PortStatus struct { Ports []PortConfig `json:",omitempty"` } + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f07322..0000000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index ea3495efeb..2a74b7a597 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -100,8 +100,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index e3a159912e..036405299e 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -14,43 +14,136 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" ) +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` } // ImageInspect contains response of Engine API: // GET "/images/{name:.*}/json" type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Variant string `json:",omitempty"` - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + VirtualSize int64 // TODO(thaJeztah): deprecate this field + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata } // ImageMetadata contains engine-local data about the image type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. LastTagTime time.Time `json:",omitempty"` } @@ -107,6 +200,15 @@ type Ping struct { OSType string Experimental bool BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status } // ComponentVersion describes the version information for a specific component. @@ -158,8 +260,8 @@ type Info struct { Plugins PluginsInfo MemoryLimit bool SwapLimit bool - KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool @@ -212,7 +314,12 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - Warnings []string + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string } // KeyValue holds a key/value pair @@ -283,6 +390,8 @@ type ExecStartCheck struct { Detach bool // Check if there's a tty Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` } // HealthcheckResult stores information about a single run of a healthcheck probe @@ -416,13 +525,44 @@ type DefaultNetworkSettings struct { // MountPoint represents a mount point configuration inside the container. // This is used for reporting the mountpoints in use by a container. type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. Destination string - Driver string `json:",omitempty"` - Mode string - RW bool + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. Propagation mount.Propagation } @@ -530,15 +670,36 @@ type ShimConfig struct { Opts interface{} } +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsageOptions holds parameters for system disk usage query. +type DiskUsageOptions struct { + // Types specifies what object types to include in the response. If empty, + // all object types are returned. + Types []DiskUsageObject +} + // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { LayersSize int64 Images []*ImageSummary Containers []*Container - Volumes []*Volume + Volumes []*volume.Volume BuildCache []*BuildCache - BuilderSize int64 // deprecated + BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } // ContainersPruneReport contains the response for Engine API: @@ -613,18 +774,31 @@ type BuildResult struct { ID string } -// BuildCache contains information about a build cache record +// BuildCache contains information about a build cache record. type BuildCache struct { - ID string - Parent string - Type string + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int } // BuildCachePruneOptions hold parameters to prune the build cache diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 8ccb0aa92e..489e917ee5 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -8,6 +8,9 @@ import ( // compare compares two version strings // returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } var ( currTab = strings.Split(v1, ".") otherTab = strings.Split(v2, ".") diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 0000000000..55fc5d3899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 0000000000..37c41a6096 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/deprecated.go b/vendor/github.com/docker/docker/api/types/volume/deprecated.go new file mode 100644 index 0000000000..ab622d8ccb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/deprecated.go @@ -0,0 +1,11 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// VolumeCreateBody Volume configuration +// +// Deprecated: use CreateOptions +type VolumeCreateBody = CreateOptions + +// VolumeListOKBody Volume list response +// +// Deprecated: use ListResponse +type VolumeListOKBody = ListResponse diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 0000000000..ca5192a2a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 0000000000..8b0dd13899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/volume.go rename to vendor/github.com/docker/docker/api/types/volume/volume.go index c69b08448d..ea7d555e5b 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -1,4 +1,4 @@ -package types +package volume // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` @@ -47,14 +50,14 @@ type Volume struct { Status map[string]interface{} `json:"Status,omitempty"` // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` + UsageData *UsageData `json:"UsageData,omitempty"` } -// VolumeUsageData Usage details about the volume. This information is used by the +// UsageData Usage details about the volume. This information is used by the // `GET /system/df` endpoint, and omitted in other endpoints. // -// swagger:model VolumeUsageData -type VolumeUsageData struct { +// swagger:model UsageData +type UsageData struct { // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go deleted file mode 100644 index 8538078dd6..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ /dev/null @@ -1,31 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumeCreateBody Volume configuration -// swagger:model VolumeCreateBody -type VolumeCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go deleted file mode 100644 index be06179bf4..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumeListOKBody Volume list response -// swagger:model VolumeListOKBody -type VolumeListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 0000000000..f958f80a66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index 3aae43e3d1..b76bf366bb 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -5,7 +5,7 @@ import ( "net/url" ) -// BuildCancel requests the daemon to cancel ongoing build request +// BuildCancel requests the daemon to cancel the ongoing build request. func (cli *Client) BuildCancel(ctx context.Context, id string) error { query := url.Values{} query.Set("id", id) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 66d46dd161..39cfb959ff 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -20,7 +20,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) defer ensureReaderClosed(resp) if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) + return checkpoints, err } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 9b2b2eaeb8..26a0fa2756 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -4,7 +4,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: https://docs.docker.com/engine/api/ -Usage +# Usage You use the library by creating a client object and calling methods on it. The client can be created either from environment variables with NewClientWithOpts(client.FromEnv), @@ -37,13 +37,11 @@ For example, to list running containers (the equivalent of "docker ps"): fmt.Printf("%s %s\n", container.ID[:10], container.Image) } } - */ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net" "net/http" "net/url" @@ -93,15 +91,18 @@ type Client struct { } // CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. func CheckRedirect(req *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse @@ -109,13 +110,20 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { return ErrRedirect } -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. // -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { @@ -153,12 +161,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) + hostURL, err := ParseHostURL(host) if err != nil { return nil, err } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) + transport := &http.Transport{} + _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) return &http.Client{ Transport: transport, CheckRedirect: CheckRedirect, @@ -194,11 +202,21 @@ func (cli *Client) ClientVersion() string { return cli.version } -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. If a manual override is in place, -// either through the `DOCKER_API_VERSION` environment variable, or if the client -// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation -// will be performed. +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { ping, _ := cli.Ping(ctx) @@ -206,23 +224,31 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { } } -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. If a manual override is -// in place, either through the `DOCKER_API_VERSION` environment variable, or if -// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no -// negotiation is performed. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { if !cli.manualOverride { - cli.negotiateAPIVersionPing(p) + cli.negotiateAPIVersionPing(pingResponse) } } // negotiateAPIVersionPing queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) negotiateAPIVersionPing(p types.Ping) { - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" } // if the client is not initialized with a version, start with the latest supported version @@ -231,8 +257,8 @@ func (cli *Client) negotiateAPIVersionPing(p types.Ping) { } // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion } // Store the results, so that automatic API version negotiation (if enabled) @@ -258,7 +284,7 @@ func (cli *Client) HTTPClient() *http.Client { func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) + return nil, errors.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -278,22 +304,9 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } -// CustomHTTPHeaders returns the custom http headers stored by the client. -func (cli *Client) CustomHTTPHeaders() map[string]string { - m := make(map[string]string) - for k, v := range cli.customHTTPHeaders { - m[k] = v - } - return m -} - -// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. -// Deprecated: use WithHTTPHeaders when creating the client. -func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { - cli.customHTTPHeaders = headers -} - -// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// // Used by `docker dial-stdio` (docker/cli#889). func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 5846f888fe..f0783f7085 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -3,7 +3,8 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "unix:///var/run/docker.sock" const defaultProto = "unix" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index c649e54412..5abe60457d 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "npipe:////./pipe/docker_engine" const defaultProto = "npipe" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index ee7d411df0..f6b1881fc3 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// ConfigCreate creates a new Config. +// ConfigCreate creates a new config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse if err := cli.NewVersionError("1.30", "config create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 7d0ce3e11c..9be7882c3d 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -20,10 +20,10 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C resp, err := cli.get(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + return swarm.Config{}, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return swarm.Config{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index a708fcaecf..24b94e9c18 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import "context" -// ConfigRemove removes a Config. +// ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.30", "config remove"); err != nil { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) + return err } diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 39e59cf858..1ac2985435 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -3,18 +3,17 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) -// ConfigUpdate attempts to update a Config +// ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { if err := cli.NewVersionError("1.30", "config update"); err != nil { return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 88ba1ef639..ba92117d3e 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -22,7 +22,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -52,6 +52,8 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{"Content-Type": {"text/plain"}} + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 2966e88c8e..cd7f763464 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerCommit applies changes into a container and creates a new tagged image. +// ContainerCommit applies changes to a container and creates a new tagged image. func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index bb278bf7f3..883be7fa34 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerStatPath returns Stat information about a path inside the container filesystem. +// ContainerStatPath returns stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. @@ -23,7 +23,7 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri response, err := cli.head(ctx, urlStr, query, nil) defer ensureReaderClosed(response) if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + return types.ContainerPathStat{}, err } return getContainerPathStatFromHeader(response.header) } @@ -47,12 +47,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str response, err := cli.putRaw(ctx, apiPath, query, content, nil) defer ensureReaderClosed(response) if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return err } return nil @@ -67,12 +62,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return nil, types.ContainerPathStat{}, err } // In order to get the copy behavior right, we need to know information diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index c5079ee539..f82420b673 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -18,24 +18,33 @@ type configWrapper struct { NetworkingConfig *network.NetworkingConfig } -// ContainerCreate creates a new container based in the given configuration. +// ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { return response, err } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { return response, err } + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index e3ee755b71..6a2cb006f8 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -5,6 +5,7 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -14,6 +15,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) defer ensureReaderClosed(resp) @@ -26,6 +30,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co // ContainerExecStart starts an exec process already created in the docker host. func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) ensureReaderClosed(resp) return err @@ -36,7 +43,12 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index c496bcffea..d48f0d3a68 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -18,7 +18,7 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, err } var response types.ContainerJSON @@ -38,10 +38,10 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return types.ContainerJSON{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 4d6f1d23da..7c9529f1e1 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -8,7 +8,9 @@ import ( // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { query := url.Values{} - query.Set("signal", signal) + if signal != "" { + query.Set("signal", signal) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index a973de597f..bd491b3db9 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -18,7 +18,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis query.Set("all", "1") } - if options.Limit != -1 { + if options.Limit > 0 { query.Set("limit", strconv.Itoa(options.Limit)) } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 5b6541f035..9bdf2b0fa6 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -24,7 +24,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -74,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, wrapResponseError(err, resp, "container", container) + return nil, err } return resp.body, nil } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index df81461b88..c21de609b0 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) + return err } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 41e421969f..1e0ad99981 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -3,18 +3,22 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for +// It makes the daemon wait for the container to be up again for // a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 629d7ab64c..2a43ce2274 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerStop stops a container. In case the container fails to stop @@ -15,10 +16,13 @@ import ( // If the timeout is nil, the container's StopTimeout value is used, if set, // otherwise the engine default. A negative timeout value can be specified, // meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 6917cf9fb3..bf68a5300e 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" ) -// ContainerUpdate updates resources of a container +// ContainerUpdate updates the resources of a container. func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 6ab8c1da96..2375eb1e80 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,14 +1,19 @@ package client // import "github.com/docker/docker/client" import ( + "bytes" "context" "encoding/json" + "errors" + "io" "net/url" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" ) +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + // ContainerWait waits until the specified container is in a certain state // indicated by the given condition, either "not-running" (default), // "next-exit", or "removed". @@ -24,16 +29,18 @@ import ( // wait request or in getting the response. This allows the caller to // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } - resultC := make(chan container.ContainerWaitOKBody) + resultC := make(chan container.WaitResponse) errC := make(chan error, 1) query := url.Values{} - query.Set("condition", string(condition)) + if condition != "" { + query.Set("condition", string(condition)) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) if err != nil { @@ -44,9 +51,23 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit go func() { defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err + + body := resp.body + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) return } @@ -58,8 +79,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // legacyContainerWait returns immediately and doesn't have an option to wait // until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) errC := make(chan error) go func() { @@ -70,7 +91,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) } defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody + var res container.WaitResponse if err := json.NewDecoder(resp.body).Decode(&res); err != nil { errC <- err return diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 354cd36939..ba0d92e9e6 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -4,23 +4,30 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/docker/docker/api/types" ) // DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { - var du types.DiskUsage +func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { + var query url.Values + if len(options.Types) > 0 { + query = url.Values{} + for _, t := range options.Types { + query.Add("type", string(t)) + } + } - serverResp, err := cli.get(ctx, "/system/df", nil, nil) + serverResp, err := cli.get(ctx, "/system/df", query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return du, err + return types.DiskUsage{}, err } + var du types.DiskUsage if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return du, fmt.Errorf("Error retrieving disk usage: %v", err) + return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) } - return du, nil } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index f4e3794cb4..7f36c99a01 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -8,7 +8,7 @@ import ( registrytypes "github.com/docker/docker/api/types/registry" ) -// DistributionInspect returns the image digest with full Manifest +// DistributionInspect returns the image digest with the full manifest. func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { // Contact the registry to retrieve digest and platform information var distributionInspect registrytypes.DistributionInspect diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 0000000000..61dd45c1d7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 041bc8d49c..e5a8a865f9 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "fmt" - "net/http" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" @@ -41,11 +40,11 @@ type notFound interface { // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - var e notFound - if errors.As(err, &e) { + if errdefs.IsNotFound(err) { return true } - return errdefs.IsNotFound(err) + var e notFound + return errors.As(err, &e) } type objectNotFoundError struct { @@ -59,35 +58,11 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return errdefs.NotImplemented(err) - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails +// +// Deprecated: use errdefs.IsUnauthorized func IsErrUnauthorized(err error) bool { - if _, ok := err.(unauthorizedError); ok { - return ok - } return errdefs.IsUnauthorized(err) } @@ -99,32 +74,12 @@ func (e pluginPermissionDenied) Error() string { return "Permission denied while installing plugin " + e.name } -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - // IsErrNotImplemented returns true if the error is a NotImplemented error. // This is returned by the API when a requested feature has not been // implemented. +// +// Deprecated: use errdefs.IsNotImplemented func IsErrNotImplemented(err error) bool { - if _, ok := err.(notImplementedError); ok { - return ok - } return errdefs.IsNotImplemented(err) } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index f0dc9d9e12..a9c48a9288 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -17,7 +17,6 @@ import ( // be sent over the error channel. If an error is sent all processing will be stopped. It's up // to the caller to reopen the stream in the event of an error by reinvoking this method. func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) errs := make(chan error, 1) diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index e1dc49ef0f..6bdacab10a 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -12,6 +12,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" ) @@ -30,12 +31,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - conn, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err } - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err + return types.NewHijackedResponse(conn, mediaType), err } // DialHijack returns a hijacked connection with negotiated protocol proto. @@ -46,7 +47,8 @@ func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[s } req = cli.addHeaders(req, meta) - return cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err } // fallbackDial is used when WithDialer() was not called. @@ -61,7 +63,7 @@ func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { return net.Dial(proto, addr) } -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -69,7 +71,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto dialer := cli.Dialer() conn, err := dialer(ctx) if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } // When we set up a TCP connection for hijack, there could be long periods @@ -91,18 +93,18 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { - return nil, err + return nil, "", err } if resp.StatusCode != http.StatusSwitchingProtocols { resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } c, br := clientconn.Hijack() if br.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite iff the underlying connection + // object that implements CloseWrite if the underlying connection // implements it. if _, ok := c.(types.CloseWriter); ok { c = &hijackedConnCloseWriter{&hijackedConn{c, br}} @@ -113,7 +115,13 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto br.Reset(nil) } - return c, nil + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 8fcf995036..d16e1d8ea9 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -14,8 +14,8 @@ import ( "github.com/docker/docker/api/types/container" ) -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(options) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 239380474e..b1c0227775 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageCreate creates a new image based in the parent options. +// ImageCreate creates a new image based on the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(parentReference) diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index d3336d4106..c5de42cb79 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageImport creates a new image based in the source options. +// ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 1eb8dce025..1de10e5a08 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types" ) @@ -17,10 +17,10 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + return types.ImageInspect{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return types.ImageInspect{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a4d7505094..950d513334 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -34,6 +34,9 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions if options.All { query.Set("all", "1") } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 84a41af0f2..6a9fb3f41f 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -23,7 +23,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) + return dels, err } err = json.NewDecoder(resp.body).Decode(&dels) diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 82955a7477..e69fa37225 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,8 +3,8 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "fmt" "net/url" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -12,13 +12,15 @@ import ( "github.com/docker/docker/errdefs" ) -// ImageSearch makes the docker host to search by a term in a remote registry. +// ImageSearch makes the docker host search by a term in a remote registry. // The list of results is not sorted in any fashion. func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index aabad4a911..e9c1ed722e 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -5,17 +5,16 @@ import ( "io" "net" "net/http" - "time" "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -48,8 +47,8 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) @@ -65,16 +64,16 @@ type ContainerAPIClient interface { ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) @@ -107,7 +106,7 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkDisconnect(ctx context.Context, network, container string, force bool) error NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) @@ -168,18 +167,19 @@ type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (types.Info, error) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context) (types.DiskUsage, error) + DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) } // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error } // SecretAPIClient defines API client methods for secrets diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 89a05b3021..0f90e2bb90 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -36,10 +36,10 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + return networkResource, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return networkResource, nil, err } diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index e71b16d869..9d6c6cef07 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index d296c9fdde..95ab9b1be0 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -17,10 +17,10 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + return swarm.Node{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Node{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 03ab878097..e44436debc 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -16,5 +16,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index de32a617fb..0d0fc3b788 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) @@ -11,7 +10,7 @@ import ( // NodeUpdate updates a Node. func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 6f77f0955f..099ad41846 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -18,51 +18,32 @@ type Opt func(*Client) error // FromEnv configures the client with values from environment variables. // -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). func FromEnv(c *Client) error { - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - } - - if host := os.Getenv("DOCKER_HOST"); host != "" { - if err := WithHost(host)(c); err != nil { - return err - } + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithVersionFromEnv(), } - - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - if err := WithVersion(version)(c); err != nil { + for _, op := range ops { + if err := op(c); err != nil { return err } } return nil } -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -// Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) Opt { - return WithDialContext(dialer.DialContext) -} - // WithDialContext applies the dialer to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { @@ -93,6 +74,18 @@ func WithHost(host string) Opt { } } +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *Client) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + // WithHTTPClient overrides the client http client with the specified one func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { @@ -148,6 +141,42 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { } } +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. +// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *Client) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + // WithVersion overrides the client version with the specified one. If an empty // version is specified, the value will be ignored to allow version negotiation. func WithVersion(version string) Opt { @@ -160,6 +189,15 @@ func WithVersion(version string) Opt { } } +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, +// the version is not modified. +func WithVersionFromEnv() Opt { + return func(c *Client) error { + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) + } +} + // WithAPIVersionNegotiation enables automatic API version negotiation for the client. // With this option enabled, the client automatically negotiates the API version // to use when making requests. API version negotiation is performed on the first diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index a9af001ef4..27e8695cb5 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -4,8 +4,10 @@ import ( "context" "net/http" "path" + "strings" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/errdefs" ) @@ -61,6 +63,13 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } + if si := resp.header.Get("Swarm"); si != "" { + parts := strings.SplitN(si, "/", 2) + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(parts[0]), + ControlAvailable: len(parts) == 2 && parts[1] == "manager", + } + } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 81b89732b0..f09e460660 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types" ) @@ -17,10 +17,10 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) + return nil, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index cf1935e2f5..2091a054d6 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -25,7 +25,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P resp, err := cli.get(ctx, "/plugins", query, nil) defer ensureReaderClosed(resp) if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") + return plugins, err } err = json.NewDecoder(resp.body).Decode(&plugins) diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 51ca1040d6..4cd66958c3 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) + return err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 7f54b1dd80..c799095c12 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -50,6 +49,14 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) @@ -110,11 +117,16 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } + resp, err := cli.doRequest(ctx, req) - if err != nil { - return resp, errdefs.FromStatusCode(err, resp.statusCode) + switch { + case errors.Is(err, context.Canceled): + return serverResponse{}, errdefs.Cancelled(err) + case errors.Is(err, context.DeadlineExceeded): + return serverResponse{}, errdefs.Deadline(err) + case err == nil: + err = cli.checkResponseErr(resp) } - err = cli.checkResponseErr(resp) return resp, errdefs.FromStatusCode(err, resp.statusCode) } @@ -129,7 +141,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") } // Don't decorate context sentinel errors; users may be comparing to @@ -141,7 +153,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if nErr, ok := err.(*url.Error); ok { if nErr, ok := nErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } @@ -168,10 +180,10 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { // Checks if client is running with elevated privileges if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { - err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") } else { f.Close() - err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") } } @@ -199,7 +211,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { R: serverResp.body, N: int64(bodyMax), } - body, err = ioutil.ReadAll(bodyR) + body, err = io.ReadAll(bodyR) if err != nil { return err } @@ -234,14 +246,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { continue } req.Header.Set(k, v) } for k, v := range headers { - req.Header[k] = v + req.Header[http.CanonicalHeaderKey(k)] = v } return req } @@ -259,7 +271,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { func ensureReaderClosed(response serverResponse) { if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) + io.CopyN(io.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index fd5b914136..c65d38a191 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// SecretCreate creates a new Secret. +// SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse if err := cli.NewVersionError("1.25", "secret create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index d093916c9a..5906874b15 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -20,10 +20,10 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + return swarm.Secret{}, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return swarm.Secret{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index c16f555804..f47f68b6e0 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import "context" -// SecretRemove removes a Secret. +// SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.25", "secret remove"); err != nil { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) + return err } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 164256bbc1..2e939e8ced 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -3,18 +3,17 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) -// SecretUpdate attempts to update a Secret +// SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { if err := cli.NewVersionError("1.25", "secret update"); err != nil { return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index e0428bf98b..23024d0f8f 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,11 +9,11 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) -// ServiceCreate creates a new Service. +// ServiceCreate creates a new service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { var response types.ServiceCreateResponse headers := map[string][]string{ diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index 2801483b80..cee020c98b 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -22,10 +22,10 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + return swarm.Service{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Service{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 953a2adf5a..2c46326ebc 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) + return err } diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index c63895f74f..8014b86258 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/url" - "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" @@ -35,7 +34,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("rollback", options.Rollback) } - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) if err := validateServiceSpec(service); err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index 56a5bea761..9fde7d75ee 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net/url" "strconv" @@ -12,10 +11,10 @@ import ( // SwarmUpdate updates the swarm. func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index 44d40ba5ae..dde1f6c59d 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) -// TaskInspectWithRaw returns the task information and its raw representation.. +// TaskInspectWithRaw returns the task information and its raw representation. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { if taskID == "" { return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} @@ -17,10 +17,10 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + return swarm.Task{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Task{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 92761b3c63..b3b182437b 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -4,18 +4,17 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { - var volume types.Volume +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, err + return vol, err } - err = json.NewDecoder(resp.body).Decode(&volume) - return volume, err + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index e20b2c67c7..b3ba4e6046 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -4,35 +4,35 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" ) // VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err } // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} } - var volume types.Volume + var vol volume.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + return vol, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { - return volume, nil, err + return vol, nil, err } rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err } diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 942498dde2..d8204f8db5 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -6,12 +6,12 @@ import ( "net/url" "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { - var volumes volumetypes.VolumeListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { + var volumes volume.ListResponse query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 79decdafab..1f26438360 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) + return err } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 0000000000..33bd31e531 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 5afe486779..77bda389d1 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -2,14 +2,12 @@ package errdefs // import "github.com/docker/docker/errdefs" import ( "net/http" - - "github.com/sirupsen/logrus" ) // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { - return err + return nil } switch statusCode { case http.StatusNotFound: @@ -33,11 +31,6 @@ func FromStatusCode(err error, statusCode int) error { err = System(err) } default: - logrus.WithError(err).WithFields(logrus.Fields{ - "module": "api", - "status_code": statusCode, - }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") - switch { case statusCode >= 200 && statusCode < 400: // it's a client error diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 50b83c62c6..e9ac1e322e 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -7,9 +7,9 @@ import ( "compress/bzip2" "compress/gzip" "context" + "encoding/binary" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -18,15 +18,31 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/fileutils" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/klauspost/compress/zstd" + "github.com/moby/patternmatcher" + "github.com/moby/sys/sequential" + "github.com/pkg/errors" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not +// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and +// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. +// +// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is +// subject to change in Moby at any time -- image authors who require consistent or known directory permissions +// should explicitly control them by ensuring that header entries exist for any applicable path. +const ImpliedDirectoryMode = 0755 + type ( // Compression is the state represents if compressed or not. Compression int @@ -39,8 +55,7 @@ type ( ExcludePatterns []string Compression Compression NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + IDMap idtools.IdentityMapping ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. @@ -62,12 +77,12 @@ type ( // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error - IDMapping *idtools.IdentityMapping + IDMapping idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} + return &Archiver{Untar: Untar} } // breakoutError is used to differentiate errors related to breaking out @@ -84,6 +99,8 @@ const ( Gzip // Xz is xz compression algorithm. Xz + // Zstd is zstd compression algorithm. + Zstd ) const ( @@ -122,18 +139,59 @@ func IsArchivePath(path string) bool { return err == nil } +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + bzip2Magic = []byte{0x42, 0x5A, 0x68} + gzipMagic = []byte{0x1F, 0x8B, 0x08} + xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// Zstandard compressed data is made of one or more frames. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { + compressionMap := map[Compression]matcher{ + Bzip2: magicNumberMatcher(bzip2Magic), + Gzip: magicNumberMatcher(gzipMagic), + Xz: magicNumberMatcher(xzMagic), + Zstd: zstdMatcher(), + } + for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { + fn := compressionMap[compression] + if fn(source) { return compression } } @@ -147,20 +205,15 @@ func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") - var noPigz bool - - if noPigzEnv != "" { - var err error - noPigz, err = strconv.ParseBool(noPigzEnv) + if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { + noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } - } - - if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) - return gzip.NewReader(buf) + if noPigz { + logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + return gzip.NewReader(buf) + } } unpigzPath, err := exec.LookPath("unpigz") @@ -225,6 +278,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) + return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } @@ -278,7 +338,9 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi return nil } - header.Name = name + if header.Name == "" { + header.Name = name + } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err @@ -333,7 +395,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi } pipeWriter.Close() - }() return pipeReader } @@ -349,16 +410,70 @@ func (compression *Compression) Extension() string { return "tar.gz" case Xz: return "tar.xz" + case Zstd: + return "tar.zst" } return "" } +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStat, if non-nil, populates hdr from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, hdr *tar.Header) error + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Compared to the archive/tar.FileInfoHeader function, this function is safe to +// call from a chrooted process as it does not populate fields which would +// require operating system lookups. It behaves identically to +// tar.FileInfoHeader when fi is a FileInfo value returned from +// tar.Header.FileInfo(). +// +// When fi is a FileInfo for a native file, such as returned from os.Stat() and +// os.Lstat(), the returned Header value differs from one returned from +// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not +// set as OS lookups would be required to populate them. The AccessTime and +// ChangeTime fields are not currently set (not yet implemented) although that +// is subject to change. Callers which require the AccessTime or ChangeTime +// fields to be zeroed should explicitly zero them out in the returned Header +// value to avoid any compatibility issues in the future. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + if sysStat != nil { + return hdr, sysStat(fi, hdr) + } + return hdr, nil +} + // FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. +// +// Compared to the archive/tar package, this function fills in less information +// but is safe to call from a chrooted process. The AccessTime and ChangeTime +// fields are not set in the returned header, ModTime is truncated to one-second +// precision, and the Uname and Gname fields are only set when fi is a FileInfo +// value returned from tar.Header.FileInfo(). Also, regardless of Go version, +// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have +// been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := FileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } @@ -368,9 +483,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } return hdr, nil } @@ -435,7 +547,7 @@ type tarAppender struct { // for hardlink mapping SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping + IdentityMapping idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the @@ -445,7 +557,7 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { +func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), @@ -560,10 +672,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) + // We use sequential file access to avoid depleting the standby list on + // Windows. On Linux, this equates to a regular os.Open. + file, err := sequential.Open(path) if err != nil { return err } @@ -601,10 +712,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. + file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -630,6 +740,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeLink: + // #nosec G305 -- The target path is checked for path traversal. targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { @@ -642,7 +753,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: @@ -667,7 +778,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err + msg := "failed to Lchown %q for UID %d, GID %d" + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" + } + return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) } } @@ -686,7 +801,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } return err } - } if len(errors) > 0 { @@ -736,12 +850,11 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + pm, err := patternmatcher.New(options.ExcludePatterns) if err != nil { return nil, err } @@ -760,7 +873,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) go func() { ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + options.IDMap, compressWriter, options.ChownOpts, ) @@ -815,6 +928,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] + var ( + parentMatchInfo []patternmatcher.MatchInfo + parentDirs []string + ) + walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { @@ -841,11 +959,30 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = pm.Matches(relFilePath) + for len(parentDirs) != 0 { + lastParentDir := parentDirs[len(parentDirs)-1] + if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { + break + } + parentDirs = parentDirs[:len(parentDirs)-1] + parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] + } + + var matchInfo patternmatcher.MatchInfo + if len(parentMatchInfo) != 0 { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) + } else { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) + } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } + + if f.IsDir() { + parentDirs = append(parentDirs, relFilePath) + parentMatchInfo = append(parentMatchInfo, matchInfo) + } } if skip { @@ -920,8 +1057,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err @@ -956,21 +1091,13 @@ loop: } } - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) - if err != nil { - return err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return err } + // #nosec G305 -- The joined path is checked for path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -1009,7 +1136,7 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMapping, hdr); err != nil { + if err := remapIDs(options.IDMap, hdr); err != nil { return err } @@ -1035,6 +1162,7 @@ loop: } for _, hdr := range dirs { + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { @@ -1044,10 +1172,40 @@ loop: return nil } +// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do +// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is +// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus +// we most both create them and choose metadata like permissions. +// +// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS +// on which the daemon is running. This precondition is required because this function assumes a OS-specific path +// separator when checking that a path is not the root. +func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { + // Not the root directory, ensure that the parent directory exists + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some + // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche + // usage that reduces the portability of an image. + rootIDs := options.IDMap.RootPair() + + err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) + if err != nil { + return err + } + } + } + + return nil +} + // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// identity (uncompressed), gzip, bzip2, xz. +// // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) @@ -1089,15 +1247,13 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1110,8 +1266,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1134,11 +1289,9 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } @@ -1146,7 +1299,6 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err @@ -1181,7 +1333,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } defer srcF.Close() - hdr, err := tar.FileInfoHeader(srcSt, "") + hdr, err := FileInfoHeaderNoLookups(srcSt, "") if err != nil { return err } @@ -1221,11 +1373,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } // IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { +func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { return archiver.IDMapping } -func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { +func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err @@ -1272,7 +1424,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") + f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 0a3cc1f92b..76321a35e3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -59,7 +59,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os Gname: hdr.Gname, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, - } + } //#nosec G305 -- An archive is being created, not extracted. } } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index fea53d3ae2..1a2aea2e65 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -11,12 +11,16 @@ import ( "strings" "syscall" - "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "golang.org/x/sys/unix" ) +func init() { + sysStat = statUnix +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { @@ -45,19 +49,24 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert - } + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert } - return + return nil } func getInodeFromStat(stat interface{}) (inode uint64, err error) { @@ -93,7 +102,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { } err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) - if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() { + if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() { // In most cases, cannot create a device if running in user namespace err = nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index aedb91b035..7f7242be50 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -5,7 +5,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -247,7 +246,6 @@ func (info *FileInfo) path() string { } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) if oldInfo == nil { @@ -320,7 +318,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } - } // Changes add changes to file information. @@ -348,7 +345,7 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { oldRoot, newRoot *FileInfo ) if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") + emptyDir, err := os.MkdirTemp("", "empty") if err != nil { return nil, err } @@ -395,10 +392,10 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { +func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + ta := newTarAppender(idMap, writer, nil) // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 4b9f504d7d..f3111b79b1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -4,7 +4,6 @@ import ( "archive/tar" "errors" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -30,8 +29,8 @@ var ( // clean path already ends in the separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) + cleanedPath = strings.ReplaceAll(cleanedPath, "/", string(sep)) + originalPath = strings.ReplaceAll(originalPath, "/", string(sep)) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { if !hasTrailingPathSeparator(cleanedPath, sep) { @@ -261,7 +260,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir // The destination exists as a directory. No alteration // to srcContent is needed as its contents can be // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil + return dstInfo.Path, io.NopCloser(srcContent), nil case dstInfo.Exists && srcInfo.IsDir: // The destination exists as some type of file and the source // content is a directory. This is an error condition since @@ -304,7 +303,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } - } // RebaseArchiveEntries rewrites the given srcContent archive replacing @@ -374,9 +372,6 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return rebased } -// TODO @gupta-ak. These might have to be changed in the future to be -// continuity driver aware as well to support LCOW. - // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 27897e6ab7..8eeccb608b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -4,13 +4,11 @@ import ( "archive/tar" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" "strings" - "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/sirupsen/logrus" @@ -33,7 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) @@ -75,20 +72,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } } - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return 0, err } // Skip AUFS metadata dirs @@ -100,7 +87,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + if aufsTempdir, err = os.MkdirTemp("", "dockerplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) @@ -114,6 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, continue } } + //#nosec G305 -- The joined path is guarded against path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -192,7 +180,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMapping, srcHdr); err != nil { + if err := remapIDs(options.IDMap, srcHdr); err != nil { return 0, err } @@ -210,6 +198,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } for _, hdr := range dirs { + //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err @@ -240,13 +229,8 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp dest = filepath.Clean(dest) // We need to be able to set any perms - if runtime.GOOS != "windows" { - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) - } + restore := overrideUmask(0) + defer restore() if decompress { decompLayer, err := DecompressStream(layer) diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go new file mode 100644 index 0000000000..d7f806445e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -0,0 +1,22 @@ +//go:build !windows +// +build !windows + +package archive + +import "golang.org/x/sys/unix" + +// overrideUmask sets current process's file mode creation mask to newmask +// and returns a function to restore it. +// +// WARNING for readers stumbling upon this code. Changing umask in a multi- +// threaded environment isn't safe. Don't use this without understanding the +// risks, and don't export this function for others to use (we shouldn't even +// be using this ourself). +// +// FIXME(thaJeztah): we should get rid of these hacks if possible. +func overrideUmask(newMask int) func() { + oldMask := unix.Umask(newMask) + return func() { + unix.Umask(oldMask) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go new file mode 100644 index 0000000000..d28f5b2dfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go @@ -0,0 +1,6 @@ +package archive + +// overrideUmask is a no-op on windows. +func overrideUmask(newmask int) func() { + return func() {} +} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go index 85435694cf..032db82cea 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -17,8 +17,8 @@ import ( // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content +// - ./foo.txt with content "hello world" +// - ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata diff --git a/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go b/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go new file mode 100644 index 0000000000..a4fc2d05e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go @@ -0,0 +1,44 @@ +package fileutils + +import "github.com/moby/patternmatcher" + +type ( + // PatternMatcher allows checking paths against a list of patterns. + // + // Deprecated: use github.com/moby/patternmatcher.PatternMatcher + PatternMatcher = patternmatcher.PatternMatcher + + // MatchInfo tracks information about parent dir matches while traversing a + // filesystem. + // + // Deprecated: use github.com/moby/patternmatcher.MatchInfo + MatchInfo = patternmatcher.MatchInfo + + // Pattern defines a single regexp used to filter file paths. + // + // Deprecated: use github.com/moby/patternmatcher.Pattern + Pattern = patternmatcher.Pattern +) + +var ( + // NewPatternMatcher creates a new matcher object for specific patterns that can + // be used later to match against patterns against paths + // + // Deprecated: use github.com/moby/patternmatcher.New + NewPatternMatcher = patternmatcher.New + + // Matches returns true if file matches any of the patterns + // and isn't excluded by any of the subsequent patterns. + // + // This implementation is buggy (it only checks a single parent dir against the + // pattern) and will be removed soon. Use MatchesOrParentMatches instead. + // + // Deprecated: use github.com/moby/patternmatcher.Matches + Matches = patternmatcher.Matches + + // MatchesOrParentMatches returns true if file matches any of the patterns + // and isn't excluded by any of the subsequent patterns. + // + // Deprecated: use github.com/moby/patternmatcher.MatchesOrParentMatches + MatchesOrParentMatches = patternmatcher.MatchesOrParentMatches +) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 34f1c726fb..7c607efc64 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -1,236 +1,12 @@ package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( - "errors" "fmt" "io" "os" "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and removes // the dst if it exists. diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go index af0c26b614..f782b4266a 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -5,7 +5,6 @@ package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "fmt" - "io/ioutil" "os" "github.com/sirupsen/logrus" @@ -14,7 +13,7 @@ import ( // GetTotalUsedFds Returns the number of used File Descriptors by // reading it via /proc filesystem. func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 5e6310fdcd..7df039b4cf 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -91,3 +91,12 @@ func GetConfigHome() (string, error) { } return filepath.Join(home, ".config"), nil } + +// GetLibHome returns $HOME/.local/lib +func GetLibHome() (string, error) { + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get HOME") + } + return filepath.Join(home, ".local/lib"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index fc48e674c1..11f1bec985 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -26,3 +26,8 @@ func GetDataHome() (string, error) { func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") } + +// GetLibHome is unsupported on non-linux system. +func GetLibHome() (string, error) { + return "", errors.New("homedir.GetLibHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 25a57b231e..1e0a89004a 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -108,70 +108,72 @@ type Identity struct { SID string } -// IdentityMapping contains a mappings of UIDs and GIDs -type IdentityMapping struct { - uids []IDMap - gids []IDMap +// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. +func (id Identity) Chown(name string) error { + return os.Chown(name, id.UID, id.GID) } -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { - return &IdentityMapping{uids: uids, gids: gids} +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) +func (i IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { +func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = toHost(pair.UID, i.UIDMaps) if err != nil { return target, err } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = toHost(pair.GID, i.GIDMaps) } return target, err } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) +func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.UIDMaps) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := toContainer(pair.GID, i.GIDMaps) return uid, gid, err } // Empty returns true if there are no id mappings -func (i *IdentityMapping) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 } -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) UIDs() []IDMap { - return i.uids +// UIDs returns the mapping for UID. +// +// Deprecated: reference the UIDMaps field directly. +func (i IdentityMapping) UIDs() []IDMap { + return i.UIDMaps } -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) GIDs() []IDMap { - return i.gids +// GIDs returns the mapping for GID. +// +// Deprecated: reference the GIDMaps field directly. +func (i IdentityMapping) GIDs() []IDMap { + return i.GIDMaps } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index ceec0339b5..03846b0307 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "os/exec" "path/filepath" "strconv" "sync" @@ -30,6 +31,10 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting // chown the full directory path if it exists var paths []string + path, err := filepath.Abs(path) + if err != nil { + return err + } stat, err := system.Stat(path) if err == nil { @@ -195,7 +200,7 @@ func callGetent(database, key string) (io.Reader, error) { } out, err := execCmd(getentCmd, database, key) if err != nil { - exitCode, errC := system.GetExitCode(err) + exitCode, errC := getExitCode(err) if errC != nil { return nil, err } @@ -209,11 +214,22 @@ func callGetent(database, key string) (io.Reader, error) { default: return nil, err } - } return bytes.NewReader(out), nil } +// getExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. @@ -240,24 +256,37 @@ func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair +// +// Deprecated: Use LoadIdentityMapping. func NewIdentityMapping(name string) (*IdentityMapping, error) { + m, err := LoadIdentityMapping(name) + if err != nil { + return nil, err + } + return &m, err +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { - return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) + return IdentityMapping{}, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } - return &IdentityMapping{ - uids: subuidRanges, - gids: subgidRanges, + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, }, nil } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 35ede0fffa..0f5aadd496 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -6,6 +6,15 @@ import ( "github.com/docker/docker/pkg/system" ) +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + // This is currently a wrapper around MkdirAll, however, since currently // permissions aren't set through this path, the identity isn't utilized. // Ownership is handled elsewhere, but in the future could be support here diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index bf7ae0564b..3ad9255df2 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -88,7 +88,6 @@ func addUser(name string) error { } func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := parseSubuid(name) diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 87514b643d..c1cfa62fd2 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -29,11 +29,12 @@ var ( // and releases new byte slices to adjust to current needs, so the buffer // won't be overgrown after peak loads. type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. + readBlock bool // check read BytesPipe is Wait() or not } // NewBytesPipe creates new BytesPipe, initialized by specified slice. @@ -50,12 +51,12 @@ func NewBytesPipe() *BytesPipe { // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() + defer bp.mu.Unlock() written := 0 loop0: for { if bp.closeErr != nil { - bp.mu.Unlock() return written, ErrClosed } @@ -72,7 +73,6 @@ loop0: // errBufferFull is an error we expect to get if the buffer is full if err != nil && err != errBufferFull { bp.wait.Broadcast() - bp.mu.Unlock() return written, err } @@ -86,6 +86,9 @@ loop0: // make sure the buffer doesn't grow too big from this write for bp.bufLen >= blockThreshold { + if bp.readBlock { + bp.wait.Broadcast() + } bp.wait.Wait() if bp.closeErr != nil { continue loop0 @@ -100,7 +103,6 @@ loop0: bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() - bp.mu.Unlock() return written, nil } @@ -126,17 +128,16 @@ func (bp *BytesPipe) Close() error { // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() + defer bp.mu.Unlock() if bp.bufLen == 0 { if bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err + return 0, bp.closeErr } + bp.readBlock = true bp.wait.Wait() + bp.readBlock = false if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err + return 0, bp.closeErr } } @@ -161,7 +162,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { } bp.wait.Broadcast() - bp.mu.Unlock() return } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go index 534d66ac26..82671d8cd5 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -2,7 +2,6 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" - "io/ioutil" "os" "path/filepath" ) @@ -11,7 +10,7 @@ import ( // temporary file and closing it atomically changes the temporary file to // destination path. Writing and closing concurrently is not allowed. func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return nil, err } @@ -94,7 +93,7 @@ type AtomicWriteSet struct { // commit. If no temporary directory is given the system // default is used. func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") + td, err := os.MkdirTemp(tmpDir, "write-set-") if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 1f657bd3dc..de00b95e3f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -2,9 +2,12 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" - "crypto/sha256" - "encoding/hex" "io" + + // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered + // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. + _ "crypto/sha256" + _ "crypto/sha512" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -49,15 +52,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - // OnEOFReader wraps an io.ReadCloser and a function // the function will run at the end of file or close the file. type OnEOFReader struct { diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go index 4e67ec2f53..7489122309 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -3,9 +3,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io/ioutil" +import "os" -// TempDir on Unix systems is equivalent to ioutil.TempDir. +// TempDir on Unix systems is equivalent to os.MkdirTemp. func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) + return os.MkdirTemp(dir, prefix) } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go index ecaba2e36d..a57fd9af6a 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -1,14 +1,14 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( - "io/ioutil" + "os" "github.com/docker/docker/pkg/longpath" ) -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) + tempDir, err := os.MkdirTemp(dir, prefix) if err != nil { return "", err } diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index 090bd1cadb..2fbbc3cb62 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -1,8 +1,19 @@ +// Package namesgenerator generates random names. +// +// This package is officially "frozen" - no new additions will be accepted. +// +// For a long time, this package provided a lot of joy within the project, but +// at some point the conflicts of opinion became greater than the added joy. +// +// At some future time, this may be replaced with something that sparks less +// controversy, but for now it will remain as-is. +// +// See also https://github.com/moby/moby/pull/43210#issuecomment-1029934277 package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator" import ( - "fmt" "math/rand" + "strconv" ) var ( @@ -60,8 +71,8 @@ var ( "hungry", "infallible", "inspiring", - "interesting", "intelligent", + "interesting", "jolly", "jovial", "keen", @@ -120,6 +131,9 @@ var ( // Docker, starting from 0.7.x, generates names from notable scientists and hackers. // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. right = [...]string{ + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB "albattani", @@ -132,9 +146,6 @@ var ( // Kathleen Antonelli, American computer programmer and one of the six original programmers of the ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli "antonelli", - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes "archimedes", @@ -249,18 +260,18 @@ var ( // Asima Chatterjee was an Indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee "chatterjee", - // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev - "chebyshev", - - // Bram Cohen - American computer programmer and author of the BitTorrent peer-to-peer protocol. https://en.wikipedia.org/wiki/Bram_Cohen - "cohen", - // David Lee Chaum - American computer scientist and cryptographer. Known for his seminal contributions in the field of anonymous communication. https://en.wikipedia.org/wiki/David_Chaum "chaum", + // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev + "chebyshev", + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke "clarke", + // Bram Cohen - American computer programmer and author of the BitTorrent peer-to-peer protocol. https://en.wikipedia.org/wiki/Bram_Cohen + "cohen", + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden "colden", @@ -596,6 +607,9 @@ var ( // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani "mirzakhani", + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + // Gordon Earle Moore - American engineer, Silicon Valley founding father, author of Moore's law. https://en.wikipedia.org/wiki/Gordon_Moore "moore", @@ -677,9 +691,6 @@ var ( // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride "ride", - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie "ritchie", @@ -782,15 +793,15 @@ var ( // Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan "vaughan", + // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani + "villani", + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya "visvesvaraya", // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard "volhard", - // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani - "villani", - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer "wescoff", @@ -837,13 +848,13 @@ var ( // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` func GetRandomName(retry int) string { begin: - name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) + name := left[rand.Intn(len(left))] + "_" + right[rand.Intn(len(right))] //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { - name = fmt.Sprintf("%s%d", name, rand.Intn(10)) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) + name += strconv.Itoa(rand.Intn(10)) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) } return name } diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go index 068e524807..e6d7b33ec0 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -25,13 +25,14 @@ func ParseKeyValueOpt(opt string) (string, string, error) { // set to `true`. Values larger than `maximum` cause an error if max is non zero, // in order to stop the map becoming excessively large. // Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 +// +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 func ParseUintListMaximum(val string, maximum int) (map[int]bool, error) { return parseUintList(val, maximum) } @@ -42,13 +43,14 @@ func ParseUintListMaximum(val string, maximum int) (map[int]bool, error) { // input string. It returns a `map[int]bool` with available elements from `val` // set to `true`. // Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 +// +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 func ParseUintList(val string) (map[int]bool, error) { return parseUintList(val, 0) } diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go deleted file mode 100644 index 4ba8fe35bf..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ /dev/null @@ -1,19 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 0000000000..ce5990c914 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +package system + +import ( + "os" + "path/filepath" + "strings" +) + +// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. +// +// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 +// as absolute as it doesn't start with a drive-letter/colon combination. However, +// in docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon). This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go new file mode 100644 index 0000000000..b2ee006314 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use os.Create or github.com/moby/sys/sequential.Create() +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use os.Open or github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile() +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 186d9d9a11..3801129404 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -3,11 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "io/ioutil" - "os" - "path/filepath" -) +import "os" // MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { @@ -19,50 +15,3 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return ioutil.TempFile(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index b4646277ab..e3fa9f731c 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -2,13 +2,8 @@ package system // import "github.com/docker/docker/pkg/system" import ( "os" - "path/filepath" "regexp" - "strconv" - "strings" - "sync" "syscall" - "time" "unsafe" "golang.org/x/sys/windows" @@ -121,172 +116,3 @@ func mkdirWithACL(name string, sddl string) error { } return nil } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) { - return true - } - return false -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go new file mode 100644 index 0000000000..e3de86be29 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os.go @@ -0,0 +1,10 @@ +package system // import "github.com/docker/docker/pkg/system" +import ( + "runtime" + "strings" +) + +// IsOSSupported determines if an operating system is supported by the host. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index a91288c60b..3c2a43ddbd 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,29 +1,18 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "os" - - "github.com/sirupsen/logrus" -) - var ( - // containerdRuntimeSupported determines if ContainerD should be the runtime. - // As of March 2019, this is an experimental feature. + // containerdRuntimeSupported determines if containerd should be the runtime. containerdRuntimeSupported = false ) -// InitContainerdRuntime sets whether to use ContainerD for runtime -// on Windows. This is an experimental feature still in development, and -// also requires an environment variable to be set (so as not to turn the -// feature on from simply experimental which would also mean LCOW. -func InitContainerdRuntime(experimental bool, cdPath string) { - if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { - logrus.Warnf("Using ContainerD runtime. This feature is experimental") +// InitContainerdRuntime sets whether to use containerd for runtime on Windows. +func InitContainerdRuntime(cdPath string) { + if len(cdPath) > 0 { containerdRuntimeSupported = true } } -// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported. func ContainerdRuntimeSupported() bool { return containerdRuntimeSupported } diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go deleted file mode 100644 index 4599a3f23c..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build windows && !no_lcow -// +build windows,!no_lcow - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - - "github.com/Microsoft/hcsshim/osversion" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - // lcowSupported determines if Linux Containers on Windows are supported. - lcowSupported = false -) - -// InitLCOW sets whether LCOW is supported or not. Requires RS5+ -func InitLCOW(experimental bool) { - if experimental && osversion.Build() >= osversion.RS5 { - lcowSupported = true - } -} - -func LCOWSupported() bool { - return lcowSupported -} - -// ValidatePlatform determines if a platform structure is valid. -// TODO This is a temporary windows-only function, should be replaced by -// comparison of worker capabilities -func ValidatePlatform(platform specs.Platform) error { - if !IsOSSupported(platform.OS) { - return errors.Errorf("unsupported os %s", platform.OS) - } - return nil -} - -// IsOSSupported determines if an operating system is supported by the host -func IsOSSupported(os string) bool { - if strings.EqualFold("windows", os) { - return true - } - if LCOWSupported() && strings.EqualFold(os, "linux") { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go deleted file mode 100644 index daadef31d5..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !windows || (windows && no_lcow) -// +build !windows windows,no_lcow - -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// InitLCOW does nothing since LCOW is a windows only feature -func InitLCOW(_ bool) {} - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} - -// ValidatePlatform determines if a platform structure is valid. This function -// is used for LCOW, and is a no-op on non-windows platforms. -func ValidatePlatform(_ specs.Platform) error { - return nil -} - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index cd060eff24..02a7377c1f 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -6,8 +6,6 @@ import ( "os" "strconv" "strings" - - units "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a @@ -42,7 +40,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { if err != nil { continue } - bytes := int64(size) * units.KiB + // Convert to KiB + bytes := int64(size) * 1024 switch parts[0] { case "MemTotal:": @@ -56,7 +55,6 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { case "SwapFree:": meminfo.SwapFree = bytes } - } if memAvailable != -1 { meminfo.MemFree = memAvailable diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go index 6ed93f2fe2..124d2c502d 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -27,7 +27,7 @@ type memorystatusex struct { } // ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. +// MemInfo type. func ReadMemInfo() (*MemInfo, error) { msi := &memorystatusex{ dwLength: 64, diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index 64e892289a..818b20efee 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -1,28 +1,18 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "fmt" - "path/filepath" - "runtime" - "strings" -) - const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . +// For Windows containers, an empty string is returned as the default +// path will be set by the container, and Docker has no context of what the +// default path should be. func DefaultPathEnv(os string) string { - if runtime.GOOS == "windows" { - if os != runtime.GOOS { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. + if os == "windows" { return "" } return defaultUnixPathEnv - } // PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter @@ -47,18 +37,5 @@ type PathVerifier interface { // /a --> \a // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { - if runtime.GOOS != "windows" || LCOWSupported() { - return path, nil - } - - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !driver.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil + return checkSystemDriveAndRemoveDriveLetter(path, driver) } diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go index 2c85371b5e..197a37a219 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -9,3 +9,9 @@ package system // import "github.com/docker/docker/pkg/system" func GetLongPathName(path string) (string, error) { return path, nil } + +// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go index 22a56136c8..7d375b0ddc 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -1,6 +1,12 @@ package system // import "github.com/docker/docker/pkg/system" -import "golang.org/x/sys/windows" +import ( + "fmt" + "path/filepath" + "strings" + + "golang.org/x/sys/windows" +) // GetLongPathName converts Windows short pathnames to full pathnames. // For example C:\Users\ADMIN~1 --> C:\Users\Administrator. @@ -25,3 +31,18 @@ func GetLongPathName(path string) (string, error) { } return windows.UTF16ToString(b), nil } + +// checkSystemDriveAndRemoveDriveLetter is the Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go index 145689b88a..1c2c6a3096 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -5,7 +5,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( "fmt" - "io/ioutil" + "os" "strings" "syscall" @@ -31,8 +31,9 @@ func KillProcess(pid int) { // http://man7.org/linux/man-pages/man5/proc.5.html func IsProcessZombie(pid int) (bool, error) { statPath := fmt.Sprintf("/proc/%d/stat", pid) - dataBytes, err := ioutil.ReadFile(statPath) + dataBytes, err := os.ReadFile(statPath) if err != nil { + // TODO(thaJeztah) should we ignore os.IsNotExist() here? ("/proc//stat" will be gone if the process exited) return false, err } data := string(dataBytes) diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go deleted file mode 100644 index f2d81597c9..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ /dev/null @@ -1,79 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "time" - - "github.com/moby/sys/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 50 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go deleted file mode 100644 index ed9c5dcb8a..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/rm_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -import "os" - -// EnsureRemoveAll is an alias to os.RemoveAll on Windows -var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index b2456cb887..0ff3af2fa1 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -20,12 +20,12 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) + return s.mode } // Mtim returns file's last modification time. func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) + return s.mtim } // Stat takes a path to a file and returns diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 7c90bffaa5..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build linux || freebsd -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/unix" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 1588aa3ef9..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,136 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/osversion" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -const ( - OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION - GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION - DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION - SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION - LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION - ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION - SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION - PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 - ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 - BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION - PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION - PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION - UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION - UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION -) - -const ( - SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE - SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT - SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE - SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER - SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY - SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE - SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT - SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT - SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT - SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL - SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT - SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT - SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY -) - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - ContainerUserSidString = "S-1-5-93-2-2" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion = osversion.OSVersion - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported) -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// dockerd.exe must be manifested to get the correct version information. -// Deprecated: use github.com/Microsoft/hcsshim/osversion.Get() instead -func GetOSVersion() OSVersion { - return osversion.Get() -} - -// IsWindowsClient returns true if the SKU is client -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(_ string) error { - return nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} - -// Deprecated: use golang.org/x/sys/windows.SetNamedSecurityInfo() -func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - result = syscall.Errno(r0) - } - return -} - -// Deprecated: uses golang.org/x/sys/windows.SecurityDescriptorFromString() and golang.org/x/sys/windows.SECURITY_DESCRIPTOR.DACL() -func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) - if r1 == 0 { - if e1 != 0 { - result = e1 - } else { - result = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index d4a15cbedc..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index fc62388c38..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/moby/patternmatcher/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE new file mode 100644 index 0000000000..6d8d58fb67 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE new file mode 100644 index 0000000000..e5154640fe --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go new file mode 100644 index 0000000000..37a1a59ac4 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/patternmatcher.go @@ -0,0 +1,474 @@ +package patternmatcher + +import ( + "errors" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + "unicode/utf8" +) + +// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. +var escapeBytes [8]byte + +// shouldEscape reports whether a rune should be escaped as part of the regex. +// +// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. +// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator +// on Windows. +// +// Adapted from regexp::QuoteMeta in go stdlib. +// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 +func shouldEscape(b rune) bool { + return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 +} + +func init() { + for _, b := range []byte(`.+()|{}$`) { + escapeBytes[b%8] |= 1 << (b / 8) + } +} + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// New creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func New(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +// +// Deprecated: This implementation is buggy (it only checks a single parent dir +// against the pattern) and will be removed soon. Use either +// MatchesOrParentMatches or MatchesUsingParentResults instead. +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesOrParentMatches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesUsingParentResult returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller keeps track of +// whether the parent directory matched. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResult is not safe to call concurrently. +// +// Deprecated: this function does behave correctly in some cases (see +// https://github.com/docker/buildx/issues/850). +// +// Use MatchesUsingParentResults instead. +func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { + matched := parentMatched + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !pattern.exclusion + } + } + return matched, nil +} + +// MatchInfo tracks information about parent dir matches while traversing a +// filesystem. +type MatchInfo struct { + parentMatched []bool +} + +// MatchesUsingParentResults returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller passes in +// intermediate results from matching the parent directory. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResults is not safe to call concurrently. +func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { + parentMatched := parentMatchInfo.parentMatched + if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { + return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") + } + + file = filepath.FromSlash(file) + matched := false + + matchInfo := MatchInfo{ + parentMatched: make([]bool, len(pm.patterns)), + } + for i, pattern := range pm.patterns { + match := false + // If the parent matched this pattern, we don't need to recheck. + if len(parentMatched) != 0 { + match = parentMatched[i] + } + + if !match { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + var err error + match, err = pattern.match(file) + if err != nil { + return false, matchInfo, err + } + + // If the zero value of MatchInfo was passed in, we don't have + // any information about the parent dir's match results, and we + // apply the same logic as MatchesOrParentMatches. + if !match && len(parentMatched) == 0 { + if parentPath := filepath.Dir(file); parentPath != "." { + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + } + } + matchInfo.parentMatched[i] = match + + if match { + matched = !pattern.exclusion + } + } + return matched, matchInfo, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + matchType matchType + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +type matchType int + +const ( + unknownMatch matchType = iota + exactMatch + prefixMatch + suffixMatch + regexpMatch +) + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + if p.matchType == unknownMatch { + if err := p.compile(string(os.PathSeparator)); err != nil { + return false, filepath.ErrBadPattern + } + } + + switch p.matchType { + case exactMatch: + return path == p.cleanedPattern, nil + case prefixMatch: + // strip trailing ** + return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil + case suffixMatch: + // strip leading ** + suffix := p.cleanedPattern[2:] + if strings.HasSuffix(path, suffix) { + return true, nil + } + // **/foo matches "foo" + return suffix[0] == os.PathSeparator && path == suffix[1:], nil + case regexpMatch: + return p.regexp.MatchString(path), nil + } + + return false, nil +} + +func (p *Pattern) compile(sl string) error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + escSL := sl + if sl == `\` { + escSL += `\` + } + + p.matchType = exactMatch + for i := 0; scan.Peek() != scanner.EOF; i++ { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + if p.matchType == exactMatch { + p.matchType = prefixMatch + } else { + regStr += ".*" + p.matchType = regexpMatch + } + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + p.matchType = regexpMatch + } + + if i == 0 { + p.matchType = suffixMatch + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + p.matchType = regexpMatch + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + p.matchType = regexpMatch + } else if shouldEscape(ch) { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + p.matchType = regexpMatch + } else { + regStr += `\` + } + } else if ch == '[' || ch == ']' { + regStr += string(ch) + p.matchType = regexpMatch + } else { + regStr += string(ch) + } + } + + if p.matchType != regexpMatch { + return nil + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + p.matchType = regexpMatch + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// This implementation is buggy (it only checks a single parent dir against the +// pattern) and will be removed soon. Use MatchesOrParentMatches instead. +func Matches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// MatchesOrParentMatches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func MatchesOrParentMatches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.MatchesOrParentMatches(file) +} diff --git a/vendor/github.com/moby/sys/mount/doc.go b/vendor/github.com/moby/sys/mount/doc.go deleted file mode 100644 index 86c2e01bbd..0000000000 --- a/vendor/github.com/moby/sys/mount/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package mount provides a set of functions to mount and unmount mounts. -// -// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD. -package mount diff --git a/vendor/github.com/moby/sys/mount/flags_bsd.go b/vendor/github.com/moby/sys/mount/flags_bsd.go deleted file mode 100644 index a7f8a71957..0000000000 --- a/vendor/github.com/moby/sys/mount/flags_bsd.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build freebsd || openbsd -// +build freebsd openbsd - -package mount - -import "golang.org/x/sys/unix" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = unix.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/moby/sys/mount/flags_linux.go b/vendor/github.com/moby/sys/mount/flags_linux.go deleted file mode 100644 index 0425d0dd63..0000000000 --- a/vendor/github.com/moby/sys/mount/flags_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = unix.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = unix.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = unix.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH -) diff --git a/vendor/github.com/moby/sys/mount/flags_unix.go b/vendor/github.com/moby/sys/mount/flags_unix.go deleted file mode 100644 index 19fa61fcca..0000000000 --- a/vendor/github.com/moby/sys/mount/flags_unix.go +++ /dev/null @@ -1,140 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} diff --git a/vendor/github.com/moby/sys/mount/mount_errors.go b/vendor/github.com/moby/sys/mount/mount_errors.go deleted file mode 100644 index b0d8582e84..0000000000 --- a/vendor/github.com/moby/sys/mount/mount_errors.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package mount - -import "strconv" - -// mountError records an error from mount or unmount operation -type mountError struct { - op string - source, target string - flags uintptr - data string - err error -} - -func (e *mountError) Error() string { - out := e.op + " " - - if e.source != "" { - out += e.source + ":" + e.target - } else { - out += e.target - } - - if e.flags != uintptr(0) { - out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) - } - if e.data != "" { - out += ", data: " + e.data - } - - out += ": " + e.err.Error() - return out -} - -// Cause returns the underlying cause of the error. -// This is a convention used in github.com/pkg/errors -func (e *mountError) Cause() error { - return e.err -} - -// Unwrap returns the underlying error. -// This is a convention used in golang 1.13+ -func (e *mountError) Unwrap() error { - return e.err -} diff --git a/vendor/github.com/moby/sys/mount/mount_unix.go b/vendor/github.com/moby/sys/mount/mount_unix.go deleted file mode 100644 index 4053fbbeb8..0000000000 --- a/vendor/github.com/moby/sys/mount/mount_unix.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package mount - -import ( - "fmt" - "sort" - - "github.com/moby/sys/mountinfo" - "golang.org/x/sys/unix" -) - -// Mount will mount filesystem according to the specified configuration. -// Options must be specified like the mount or fstab unix commands: -// "opt1=val1,opt2=val2". See flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise does -// a normal unmount. If target is not a mount point, no error is returned. -func Unmount(target string) error { - err := unix.Unmount(target, mntDetach) - if err == nil || err == unix.EINVAL { //nolint:errorlint // unix errors are bare - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil - } - - return &mountError{ - op: "umount", - target: target, - flags: uintptr(mntDetach), - err: err, - } -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting -// with the deepest mount first. The argument does not have to be a mount -// point itself. -func RecursiveUnmount(target string) error { - // Fast path, works if target is a mount point that can be unmounted. - // On Linux, mntDetach flag ensures a recursive unmount. For other - // platforms, if there are submounts, we'll get EBUSY (and fall back - // to the slow path). NOTE we do not ignore EINVAL here as target might - // not be a mount point itself (but there can be mounts underneath). - if err := unix.Unmount(target, mntDetach); err == nil { - return nil - } - - // Slow path: get all submounts, sort, unmount one by one. - mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target)) - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - var ( - suberr error - lastMount = len(mounts) - 1 - ) - for i, m := range mounts { - err = Unmount(m.Mountpoint) - if err != nil { - if i == lastMount { - if suberr != nil { - return fmt.Errorf("%w (possible cause: %s)", err, suberr) - } - return err - } - // This is a submount, we can ignore the error for now, - // the final unmount will fail if this is a real problem. - // With that in mind, the _first_ failed unmount error - // might be the real error cause, so let's keep it. - if suberr == nil { - suberr = err - } - } - } - return nil -} diff --git a/vendor/github.com/moby/sys/mount/mounter_freebsd.go b/vendor/github.com/moby/sys/mount/mounter_freebsd.go deleted file mode 100644 index 1fffb69015..0000000000 --- a/vendor/github.com/moby/sys/mount/mounter_freebsd.go +++ /dev/null @@ -1,62 +0,0 @@ -//go:build freebsd && cgo -// +build freebsd,cgo - -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "strings" - "syscall" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: flag, - err: syscall.Errno(errno), - } - } - return nil -} diff --git a/vendor/github.com/moby/sys/mount/mounter_linux.go b/vendor/github.com/moby/sys/mount/mounter_linux.go deleted file mode 100644 index 4e18f4b678..0000000000 --- a/vendor/github.com/moby/sys/mount/mounter_linux.go +++ /dev/null @@ -1,72 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == "none": - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: oflags, - data: data, - err: err, - } - } - } - - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return &mountError{ - op: "remount", - target: target, - flags: flags & pflags, - err: err, - } - } - } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { - return &mountError{ - op: "remount-ro", - target: target, - flags: oflags | unix.MS_REMOUNT, - err: err, - } - } - } - - return nil -} diff --git a/vendor/github.com/moby/sys/mount/mounter_openbsd.go b/vendor/github.com/moby/sys/mount/mounter_openbsd.go deleted file mode 100644 index 3c0718b907..0000000000 --- a/vendor/github.com/moby/sys/mount/mounter_openbsd.go +++ /dev/null @@ -1,78 +0,0 @@ -//go:build openbsd && cgo -// +build openbsd,cgo - -/* - Due to how OpenBSD mount(2) works, filesystem types need to be - supported explicitly since it uses separate structs to pass - filesystem-specific arguments. - - For now only UFS/FFS is supported as it's the default fs - on OpenBSD systems. - - See: https://man.openbsd.org/mount.2 -*/ - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" - "syscall" - "unsafe" -) - -func createExportInfo(readOnly bool) C.struct_export_args { - exportFlags := C.int(0) - if readOnly { - exportFlags = C.MNT_EXRDONLY - } - out := C.struct_export_args{ - ex_root: 0, - ex_flags: exportFlags, - } - return out -} - -func createUfsArgs(device string, readOnly bool) unsafe.Pointer { - out := &C.struct_ufs_args{ - fspec: C.CString(device), - export_info: createExportInfo(readOnly), - } - return unsafe.Pointer(out) -} - -func mount(device, target, mType string, flag uintptr, data string) error { - readOnly := flag&RDONLY != 0 - - var fsArgs unsafe.Pointer - - switch mType { - case "ffs": - fsArgs = createUfsArgs(device, readOnly) - default: - return &mountError{ - op: "mount", - source: device, - target: target, - flags: flag, - err: fmt.Errorf("unsupported file system type: %s", mType), - } - } - - if errno := C.mount(C.CString(mType), C.CString(target), C.int(flag), fsArgs); errno != 0 { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: flag, - err: syscall.Errno(errno), - } - } - - return nil -} diff --git a/vendor/github.com/moby/sys/mount/mounter_unsupported.go b/vendor/github.com/moby/sys/mount/mounter_unsupported.go deleted file mode 100644 index b69d62bd65..0000000000 --- a/vendor/github.com/moby/sys/mount/mounter_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build (!linux && !freebsd && !openbsd && !windows && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) -// +build !linux,!freebsd,!openbsd,!windows,!darwin freebsd,!cgo openbsd,!cgo - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("cgo required on freebsd and openbsd") -} diff --git a/vendor/github.com/moby/sys/mount/sharedsubtree_linux.go b/vendor/github.com/moby/sys/mount/sharedsubtree_linux.go deleted file mode 100644 index 948e6bacd8..0000000000 --- a/vendor/github.com/moby/sys/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,73 +0,0 @@ -package mount - -import "github.com/moby/sys/mountinfo" - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, SHARED) -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, RSHARED) -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, PRIVATE) -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, RPRIVATE) -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, SLAVE) -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, RSLAVE) -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, UNBINDABLE) -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, RUNBINDABLE) -} - -// MakeMount ensures that the file or directory given is a mount point, -// bind mounting it to itself it case it is not. -func MakeMount(mnt string) error { - mounted, err := mountinfo.Mounted(mnt) - if err != nil { - return err - } - if mounted { - return nil - } - - return mount(mnt, mnt, "none", uintptr(BIND), "") -} - -func ensureMountedAs(mnt string, flags int) error { - if err := MakeMount(mnt); err != nil { - return err - } - - return mount("", mnt, "none", uintptr(flags), "") -} diff --git a/vendor/github.com/moby/sys/mount/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE similarity index 100% rename from vendor/github.com/moby/sys/mount/LICENSE rename to vendor/github.com/moby/sys/sequential/LICENSE diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go new file mode 100644 index 0000000000..af2817504b --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/doc.go @@ -0,0 +1,15 @@ +// Package sequential provides a set of functions for managing sequential +// files on Windows. +// +// The origin of these functions are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. +// +// For non-Windows platforms, the package provides wrappers for the equivalents +// in the os packages. They are passthrough on Unix platforms, and only relevant +// on Windows. +package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go new file mode 100644 index 0000000000..a3c7340e3a --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package sequential + +import "os" + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return os.Create(name) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// CreateTemp creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + return os.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go new file mode 100644 index 0000000000..3f7f0d83e0 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -0,0 +1,161 @@ +package sequential + +import ( + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, err := openFileSequential(name, flag, 0) + if err == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: err} +} + +func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for CreateTemp +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// CreateTemp is a copy of os.CreateTemp, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go index 65556027a6..8a5e09f584 100644 --- a/vendor/github.com/moby/term/tc.go +++ b/vendor/github.com/moby/term/tc.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package term diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go index 29c6acf1c7..8c1fc1b5a5 100644 --- a/vendor/github.com/moby/term/term.go +++ b/vendor/github.com/moby/term/term.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Package term provides structures and helper functions to work with @@ -14,10 +15,8 @@ import ( "golang.org/x/sys/unix" ) -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) +// ErrInvalidState is returned if the state of the terminal is invalid. +var ErrInvalidState = errors.New("Invalid terminal state") // State represents the state of the terminal. type State struct { diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go index ba82960d4a..3cdc8edbda 100644 --- a/vendor/github.com/moby/term/term_windows.go +++ b/vendor/github.com/moby/term/term_windows.go @@ -66,10 +66,6 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { } } - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and - // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as - // go-ansiterm hasn't switch to x/sys/windows. - // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { h := uint32(windows.STD_INPUT_HANDLE) stdIn = windowsconsole.NewAnsiReader(int(h)) diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios.go index 0f028e2273..99c0f7de60 100644 --- a/vendor/github.com/moby/term/termios.go +++ b/vendor/github.com/moby/term/termios.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package term diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go index 922dd4baab..45f77e03c7 100644 --- a/vendor/github.com/moby/term/termios_bsd.go +++ b/vendor/github.com/moby/term/termios_bsd.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd || openbsd || netbsd // +build darwin freebsd openbsd netbsd package term diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go index 038fd61ba1..88b7b21563 100644 --- a/vendor/github.com/moby/term/termios_nonbsd.go +++ b/vendor/github.com/moby/term/termios_nonbsd.go @@ -1,4 +1,5 @@ -//+build !darwin,!freebsd,!netbsd,!openbsd,!windows +//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows +// +build !darwin,!freebsd,!netbsd,!openbsd,!windows package term diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go index 155251521b..f32aa537ef 100644 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package windowsconsole @@ -190,7 +191,6 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri // -S Suspends printing on the screen (does not stop the program). // -U Deletes all characters on the current line. Also called the KILL key. // -E Quits current command and creates a core - } // +Key generates ESC N Key diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go index ccb5ef0775..4243307fd3 100644 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ b/vendor/github.com/moby/term/windows/ansi_writer.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package windowsconsole diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go index 993694ddcd..116b74e8f5 100644 --- a/vendor/github.com/moby/term/windows/console.go +++ b/vendor/github.com/moby/term/windows/console.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package windowsconsole diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go index 1ef98d5996..bea8d4595c 100644 --- a/vendor/github.com/moby/term/winsize.go +++ b/vendor/github.com/moby/term/winsize.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package term diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go index fea096c180..07e0f77dc2 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -78,6 +78,9 @@ func ReleaseLabel(label string) error { // Deprecated: use selinux.DupSecOpt var DupSecOpt = selinux.DupSecOpt +// FormatMountLabel returns a string to be used by the mount command. Using +// the SELinux `context` mount option. Changing labels of files on mount +// points with this option can never be changed. // FormatMountLabel returns a string to be used by the mount command. // The format of this string will be used to alter the labeling of the mountpoint. // The string returned is suitable to be used as the options field of the mount command. @@ -85,12 +88,27 @@ var DupSecOpt = selinux.DupSecOpt // the first parameter. Second parameter is the label that you wish to apply // to all content in the mount point. func FormatMountLabel(src, mountLabel string) string { + return FormatMountLabelByType(src, mountLabel, "context") +} + +// FormatMountLabelByType returns a string to be used by the mount command. +// Allow caller to specify the mount options. For example using the SELinux +// `fscontext` mount option would allow certain container processes to change +// labels of files created on the mount points, where as `context` option does +// not. +// FormatMountLabelByType returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabelByType(src, mountLabel, contextType string) string { if mountLabel != "" { switch src { case "": - src = fmt.Sprintf("context=%q", mountLabel) + src = fmt.Sprintf("%s=%q", contextType, mountLabel) default: - src = fmt.Sprintf("%s,context=%q", src, mountLabel) + src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel) } } return src diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go deleted file mode 100644 index 8bff293557..0000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux && go1.16 -// +build linux,go1.16 - -package selinux - -import ( - "errors" - "io/fs" - "os" - - "github.com/opencontainers/selinux/pkg/pwalkdir" -) - -func rchcon(fpath, label string) error { - fastMode := false - // If the current label matches the new label, assume - // other labels are correct. - if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { - fastMode = true - } - return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { - if fastMode { - if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { - return nil - } - } - e := lSetFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go deleted file mode 100644 index 303cb1890a..0000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build linux && !go1.16 -// +build linux,!go1.16 - -package selinux - -import ( - "errors" - "os" - - "github.com/opencontainers/selinux/pkg/pwalk" -) - -func rchcon(fpath, label string) error { - return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error { - e := lSetFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index 5a59d151f6..af058b84b1 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -23,8 +23,13 @@ var ( // ErrEmptyPath is returned when an empty path has been specified. ErrEmptyPath = errors.New("empty path") + // ErrInvalidLabel is returned when an invalid label is specified. + ErrInvalidLabel = errors.New("invalid Label") + // InvalidLabel is returned when an invalid label is specified. - InvalidLabel = errors.New("Invalid Label") + // + // Deprecated: use [ErrInvalidLabel]. + InvalidLabel = ErrInvalidLabel // ErrIncomparable is returned two levels are not comparable ErrIncomparable = errors.New("incomparable levels") @@ -144,7 +149,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) { // of the program is finished to guarantee another goroutine does not migrate to the current // thread before execution is complete. func SetExecLabel(label string) error { - return setExecLabel(label) + return writeCon(attrPath("exec"), label) } // SetTaskLabel sets the SELinux label for the current thread, or an error. @@ -152,21 +157,21 @@ func SetExecLabel(label string) error { // be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee // the current thread does not run in a new mislabeled thread. func SetTaskLabel(label string) error { - return setTaskLabel(label) + return writeCon(attrPath("current"), label) } // SetSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created. Calls to SetSocketLabel // should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until -// the the socket is created to guarantee another goroutine does not migrate +// the socket is created to guarantee another goroutine does not migrate // to the current thread before execution is complete. func SetSocketLabel(label string) error { - return setSocketLabel(label) + return writeCon(attrPath("sockcreate"), label) } // SocketLabel retrieves the current socket label setting func SocketLabel() (string, error) { - return socketLabel() + return readCon(attrPath("sockcreate")) } // PeerLabel retrieves the label of the client on the other side of a socket @@ -185,7 +190,7 @@ func SetKeyLabel(label string) error { // KeyLabel retrieves the current kernel keyring label setting func KeyLabel() (string, error) { - return keyLabel() + return readCon("/proc/self/attr/keycreate") } // Get returns the Context as a string @@ -208,6 +213,11 @@ func ReserveLabel(label string) { reserveLabel(label) } +// MLSEnabled checks if MLS is enabled. +func MLSEnabled() bool { + return isMLSEnabled() +} + // EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func EnforceMode() int { return enforceMode() @@ -220,7 +230,7 @@ func SetEnforceMode(mode int) error { } // DefaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func DefaultEnforceMode() int { return defaultEnforceMode() @@ -266,7 +276,7 @@ func CopyLevel(src, dest string) (string, error) { return copyLevel(src, dest) } -// Chcon changes the fpath file object to the SELinux label label. +// Chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then Chcon walks the // directory tree setting the label. // @@ -284,7 +294,7 @@ func DupSecOpt(src string) ([]string, error) { // DisableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. func DisableSecOpt() []string { - return disableSecOpt() + return []string{"disable"} } // GetDefaultContextWithLevel gets a single context for the specified SELinux user diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 4582cc9e0a..f1e95977d3 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -8,16 +8,16 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "io/fs" "math/big" "os" "os/user" - "path" "path/filepath" "strconv" "strings" "sync" + "github.com/opencontainers/selinux/pkg/pwalkdir" "golang.org/x/sys/unix" ) @@ -35,17 +35,17 @@ const ( ) type selinuxState struct { + mcsList map[string]bool + selinuxfs string + selinuxfsOnce sync.Once enabledSet bool enabled bool - selinuxfsOnce sync.Once - selinuxfs string - mcsList map[string]bool sync.Mutex } type level struct { - sens uint cats *big.Int + sens uint } type mlsRange struct { @@ -54,10 +54,10 @@ type mlsRange struct { } type defaultSECtx struct { - user, level, scon string - userRdr, defaultRdr io.Reader - - verifier func(string) error + userRdr io.Reader + verifier func(string) error + defaultRdr io.Reader + user, level, scon string } type levelItem byte @@ -155,7 +155,7 @@ func findSELinuxfs() string { } // check if selinuxfs is available before going the slow path - fs, err := ioutil.ReadFile("/proc/filesystems") + fs, err := os.ReadFile("/proc/filesystems") if err != nil { return "" } @@ -292,7 +292,7 @@ func readCon(fpath string) (string, error) { } func readConFd(in *os.File) (string, error) { - data, err := ioutil.ReadAll(in) + data, err := io.ReadAll(in) if err != nil { return "", err } @@ -305,7 +305,7 @@ func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) - indexB, err := ioutil.ReadFile(indexpath) + indexB, err := os.ReadFile(indexpath) if err != nil { return -1, err } @@ -391,21 +391,19 @@ func lFileLabel(fpath string) (string, error) { return string(label), nil } -// setFSCreateLabel tells kernel the label to create all file system objects -// created by this task. Setting label="" to return to default. func setFSCreateLabel(label string) error { - return writeAttr("fscreate", label) + return writeCon(attrPath("fscreate"), label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { - return readAttr("fscreate") + return readCon(attrPath("fscreate")) } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { - return readAttr("current") + return readCon(attrPath("current")) } // pidLabel returns the SELinux label of the given pid, or an error. @@ -416,7 +414,7 @@ func pidLabel(pid int) (string, error) { // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { - return readAttr("exec") + return readCon(attrPath("exec")) } func writeCon(fpath, val string) error { @@ -462,18 +460,10 @@ func attrPath(attr string) string { }) if haveThreadSelf { - return path.Join(threadSelfPrefix, attr) + return filepath.Join(threadSelfPrefix, attr) } - return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) -} - -func readAttr(attr string) (string, error) { - return readCon(attrPath(attr)) -} - -func writeAttr(attr, val string) error { - return writeCon(attrPath(attr), val) + return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr) } // canonicalizeContext takes a context string and writes it to the kernel @@ -560,30 +550,30 @@ func (l *level) parseLevel(levelStr string) error { // rangeStrToMLSRange marshals a string representation of a range. func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { - mlsRange := &mlsRange{} - levelSlice := strings.SplitN(rangeStr, "-", 2) + r := &mlsRange{} + l := strings.SplitN(rangeStr, "-", 2) - switch len(levelSlice) { + switch len(l) { // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 case 2: - mlsRange.high = &level{} - if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { - return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err) + r.high = &level{} + if err := r.high.parseLevel(l[1]); err != nil { + return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: - mlsRange.low = &level{} - if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { - return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err) + r.low = &level{} + if err := r.low.parseLevel(l[0]); err != nil { + return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err) } } - if mlsRange.high == nil { - mlsRange.high = mlsRange.low + if r.high == nil { + r.high = r.low } - return mlsRange, nil + return r, nil } // bitsetToStr takes a category bitset and returns it in the @@ -617,17 +607,17 @@ func bitsetToStr(c *big.Int) string { return str } -func (l1 *level) equal(l2 *level) bool { - if l2 == nil || l1 == nil { - return l1 == l2 +func (l *level) equal(l2 *level) bool { + if l2 == nil || l == nil { + return l == l2 } - if l1.sens != l2.sens { + if l2.sens != l.sens { return false } - if l2.cats == nil || l1.cats == nil { - return l2.cats == l1.cats + if l2.cats == nil || l.cats == nil { + return l2.cats == l.cats } - return l1.cats.Cmp(l2.cats) == 0 + return l.cats.Cmp(l2.cats) == 0 } // String returns an mlsRange as a string. @@ -721,36 +711,13 @@ func readWriteCon(fpath string, val string) (string, error) { return readConFd(f) } -// setExecLabel sets the SELinux label that the kernel will use for any programs -// that are executed by the current process thread, or an error. -func setExecLabel(label string) error { - return writeAttr("exec", label) -} - -// setTaskLabel sets the SELinux label for the current thread, or an error. -// This requires the dyntransition permission. -func setTaskLabel(label string) error { - return writeAttr("current", label) -} - -// setSocketLabel takes a process label and tells the kernel to assign the -// label to the next socket that gets created -func setSocketLabel(label string) error { - return writeAttr("sockcreate", label) -} - -// socketLabel retrieves the current socket label setting -func socketLabel() (string, error) { - return readAttr("sockcreate") -} - // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { - label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) if err != nil { return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err} } - return label, nil + return l, nil } // setKeyLabel takes a process label and tells the kernel to assign the @@ -766,15 +733,10 @@ func setKeyLabel(label string) error { return err } -// keyLabel retrieves the current kernel keyring label setting -func keyLabel() (string, error) { - return readCon("/proc/self/attr/keycreate") -} - // get returns the Context as a string func (c Context) get() string { - if level := c["level"]; level != "" { - return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level + if l := c["level"]; l != "" { + return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l } return c["user"] + ":" + c["role"] + ":" + c["type"] } @@ -786,7 +748,7 @@ func newContext(label string) (Context, error) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) < 3 { - return c, InvalidLabel + return c, ErrInvalidLabel } c["user"] = con[0] c["role"] = con[1] @@ -816,14 +778,23 @@ func reserveLabel(label string) { } func selinuxEnforcePath() string { - return path.Join(getSelinuxMountPoint(), "enforce") + return filepath.Join(getSelinuxMountPoint(), "enforce") +} + +// isMLSEnabled checks if MLS is enabled. +func isMLSEnabled() bool { + enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls")) + if err != nil { + return false + } + return bytes.Equal(enabledB, []byte{'1'}) } // enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func enforceMode() int { var enforce int - enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) + enforceB, err := os.ReadFile(selinuxEnforcePath()) if err != nil { return -1 } @@ -837,11 +808,12 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func defaultEnforceMode() int { switch readConfig(selinuxTag) { @@ -941,7 +913,7 @@ func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } - return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts")) + return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts")) } func loadLabels() { @@ -1044,7 +1016,8 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on @@ -1073,7 +1046,7 @@ func copyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// chcon changes the fpath file object to the SELinux label label. +// chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. func chcon(fpath string, label string, recurse bool) error { @@ -1084,7 +1057,7 @@ func chcon(fpath string, label string, recurse bool) error { return nil } - exclude_paths := map[string]bool{ + excludePaths := map[string]bool{ "/": true, "/bin": true, "/boot": true, @@ -1112,19 +1085,19 @@ func chcon(fpath string, label string, recurse bool) error { } if home := os.Getenv("HOME"); home != "" { - exclude_paths[home] = true + excludePaths[home] = true } if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { if usr, err := user.Lookup(sudoUser); err == nil { - exclude_paths[usr.HomeDir] = true + excludePaths[usr.HomeDir] = true } } if fpath != "/" { fpath = strings.TrimSuffix(fpath, "/") } - if exclude_paths[fpath] { + if excludePaths[fpath] { return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) } @@ -1152,6 +1125,28 @@ func chcon(fpath string, label string, recurse bool) error { return rchcon(fpath, label) } +func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } + return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + err := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + }) +} + // dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. func dupSecOpt(src string) ([]string, error) { @@ -1180,12 +1175,6 @@ func dupSecOpt(src string) ([]string, error) { return dup, nil } -// disableSecOpt returns a security opt that can be used to disable SELinux -// labeling support for future container processes. -func disableSecOpt() []string { - return []string{"disable"} -} - // findUserInContext scans the reader for a valid SELinux context // match that is verified with the verifier. Invalid contexts are // skipped. It returns a matched context or an empty string if no diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 20d8880312..bc3fd3b370 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -3,9 +3,20 @@ package selinux -func setDisabled() { +func attrPath(string) string { + return "" +} + +func readCon(fpath string) (string, error) { + return "", nil +} + +func writeCon(string, string) error { + return nil } +func setDisabled() {} + func getEnabled() bool { return false } @@ -62,22 +73,6 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { return "", nil } -func setExecLabel(label string) error { - return nil -} - -func setTaskLabel(label string) error { - return nil -} - -func setSocketLabel(label string) error { - return nil -} - -func socketLabel() (string, error) { - return "", nil -} - func peerLabel(fd uintptr) (string, error) { return "", nil } @@ -86,17 +81,12 @@ func setKeyLabel(label string) error { return nil } -func keyLabel() (string, error) { - return "", nil -} - func (c Context) get() string { return "" } func newContext(label string) (Context, error) { - c := make(Context) - return c, nil + return Context{}, nil } func clearLabels() { @@ -105,6 +95,10 @@ func clearLabels() { func reserveLabel(label string) { } +func isMLSEnabled() bool { + return false +} + func enforceMode() int { return Disabled } @@ -152,10 +146,6 @@ func dupSecOpt(src string) ([]string, error) { return nil, nil } -func disableSecOpt() []string { - return []string{"disable"} -} - func getDefaultContextWithLevel(user, level, scon string) (string, error) { return "", nil } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go index 202c80da59..a28b4c4bbb 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go @@ -8,6 +8,10 @@ import ( "sync" ) +// WalkFunc is the type of the function called by Walk to visit each +// file or directory. It is an alias for [filepath.WalkFunc]. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir] and [fs.WalkDirFunc]. type WalkFunc = filepath.WalkFunc // Walk is a wrapper for filepath.Walk which can call multiple walkFn @@ -29,6 +33,8 @@ type WalkFunc = filepath.WalkFunc // - if more than one walkFn instance will return an error, only one // of such errors will be propagated and returned by Walk, others // will be silently discarded. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.Walk] func Walk(root string, walkFn WalkFunc) error { return WalkN(root, walkFn, runtime.NumCPU()*2) } @@ -38,6 +44,8 @@ func Walk(root string, walkFn WalkFunc) error { // num walkFn will be called at any one time. // // Please see Walk documentation for caveats of using this function. +// +// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.WalkN] func WalkN(root string, walkFn WalkFunc, num int) error { // make sure limit is sensible if num < 1 { @@ -110,6 +118,6 @@ func WalkN(root string, walkFn WalkFunc, num int) error { // walkArgs holds the arguments that were passed to the Walk or WalkN // functions. type walkArgs struct { - path string info *os.FileInfo + path string } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go index a5796b2c4f..0f5d9f580d 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -111,6 +111,6 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { // walkArgs holds the arguments that were passed to the Walk or WalkN // functions. type walkArgs struct { - path string entry fs.DirEntry + path string } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index 2afbbf1b22..2d2742d73c 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -136,7 +136,7 @@ func label(b *Builder, args []string, attributes map[string]bool, flagArgs []str // func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { if len(args) < 2 { - return errAtLeastOneArgument("ADD") + return errAtLeastTwoArgument("ADD") } var chown string var chmod string @@ -177,7 +177,7 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin // func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { if len(args) < 2 { - return errAtLeastOneArgument("COPY") + return errAtLeastTwoArgument("COPY") } last := len(args) - 1 dest := makeAbsolute(args[last], b.RunConfig.WorkingDir) @@ -704,6 +704,10 @@ func errAtLeastOneArgument(command string) error { return fmt.Errorf("%s requires at least one argument", command) } +func errAtLeastTwoArgument(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + func errExactlyOneArgument(command string) error { return fmt.Errorf("%s requires exactly one argument", command) } diff --git a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go index b3f4ff4f6d..33c2a8ac50 100644 --- a/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go +++ b/vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go @@ -12,8 +12,8 @@ import ( "strings" "unicode" - "github.com/docker/docker/pkg/system" "github.com/openshift/imagebuilder/dockerfile/command" + "github.com/containers/storage/pkg/system" "github.com/pkg/errors" ) diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 0000000000..2c033dff47 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 0000000000..ecc0dabb74 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 0000000000..cff0cd49ec --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,258 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 0000000000..f14f40da71 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 0000000000..2a632476c5 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 0000000000..efaa1c8b71 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index df03b87edd..dddeab7623 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -78,7 +78,6 @@ github.com/containerd/containerd/errdefs github.com/containerd/containerd/log github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms -github.com/containerd/containerd/sys # github.com/containerd/stargz-snapshotter/estargz v0.14.1 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz @@ -175,8 +174,8 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be -## explicit; go 1.17 +# github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48 +## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -201,6 +200,7 @@ github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless +github.com/containers/image/v5/internal/set github.com/containers/image/v5/internal/signature github.com/containers/image/v5/internal/signer github.com/containers/image/v5/internal/streamdigest @@ -374,7 +374,7 @@ github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v20.10.23+incompatible +# github.com/docker/docker v23.0.0+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -533,7 +533,7 @@ github.com/google/gofuzz/bytesource # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/trillian v1.5.0 +# github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 ## explicit; go 1.17 github.com/google/trillian github.com/google/trillian/types @@ -632,14 +632,17 @@ github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/moby/sys/mount v0.3.3 -## explicit; go 1.16 -github.com/moby/sys/mount +# github.com/moby/patternmatcher v0.5.0 +## explicit; go 1.19 +github.com/moby/patternmatcher # github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo -# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 -## explicit; go 1.13 +# github.com/moby/sys/sequential v0.5.0 +## explicit; go 1.17 +github.com/moby/sys/sequential +# github.com/moby/term v0.0.0-20221120202655-abb19827d345 +## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -730,13 +733,13 @@ github.com/opencontainers/runtime-tools/generate github.com/opencontainers/runtime-tools/generate/seccomp github.com/opencontainers/runtime-tools/specerror github.com/opencontainers/runtime-tools/validate -# github.com/opencontainers/selinux v1.10.2 -## explicit; go 1.13 +# github.com/opencontainers/selinux v1.11.0 +## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df +# github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73 ## explicit; go 1.16 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command @@ -830,8 +833,8 @@ github.com/stefanberger/go-pkcs11uri ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.9.0 -## explicit; go 1.18 +# github.com/sylabs/sif/v2 v2.9.1 +## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -934,6 +937,11 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts +# golang.org/x/exp v0.0.0-20230206171751-46f607a40771 +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/maps +golang.org/x/exp/slices # golang.org/x/mod v0.7.0 ## explicit; go 1.17 golang.org/x/mod/semver